fix:conflicts from 3.0

This commit is contained in:
wangmm0220 2023-10-16 11:35:56 +08:00
commit 473142fab7
261 changed files with 11227 additions and 13401 deletions

5
SECURITY.md Normal file
View File

@ -0,0 +1,5 @@
# Security Policy
## Reporting a Vulnerability
Please submit CVE to https://github.com/taosdata/TDengine/security/advisories.

View File

@ -399,7 +399,7 @@ if(${BUILD_WITH_COS})
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include)
MESSAGE("$ENV{HOME}/.cos-local.1/include")
set(CMAKE_BUILD_TYPE debug)
set(CMAKE_BUILD_TYPE Release)
set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME})
set(CMAKE_PROJECT_NAME cos_c_sdk)

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -4,11 +4,11 @@ description: This document introduces the major features, competitive advantages
toc_max_heading_level: 2
---
TDengine is an [open source](https://tdengine.com/tdengine/open-source-time-series-database/), [high-performance](https://tdengine.com/tdengine/high-performance-time-series-database/), [cloud native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation.
TDengine is a big data platform designed and optimized for IoT (Internet of Things) and Industrial Internet. It can safely and effetively converge, store, process and distribute high volume data (TB or even PB) generated everyday by a lot of devices and data acquisition units, monitor and alert business operation status in real time and provide real time business insight. The core component of TDengine is TDengine OSS, which is a high performance, open source, cloud native and simplified time series database.
This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine.
## Major Features
## Major Features of TDengine OSS
The major features are listed below:
@ -132,3 +132,9 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
- [Introduction to Time-Series Database](https://tdengine.com/tsdb/)
- [Introduction to TDengine competitive advantages](https://tdengine.com/tdengine/)
## Products
There are two products offered by TDengine: TDengine Enterprise and TDengine Cloud, for details please refer to
- [TDengine Enterprise](https://www.taosdata.com/tdengine-pro)
- [TDengine Cloud](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)

View File

@ -221,7 +221,7 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
&& tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1
```
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please check available version from [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
> Version number `v1.0.2` is only for example, it can be replaced to any newer version.
**Non-Swoole Environment: **

View File

@ -55,7 +55,7 @@ At most 4096 columns are allowed in a STable. If there are more than 4096 of met
## Create Table
A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Additionally, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the meters in [Table 1](/tdinternal/arch#model_table1), the table can be created using below SQL statement.
A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Additionally, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the smart meters table, the table can be created using below SQL statement.
```sql
CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2);

View File

@ -352,10 +352,10 @@ You configure the following parameters when creating a consumer:
| `td.connect.port` | string | Port of the server side | |
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. Each topic can create up to 100 consumer groups. |
| `client.id` | string | Client ID | Maximum length: 192. |
| `auto.offset.reset` | enum | Initial offset for the consumer group | `earliest`: subscribe from the earliest data, this is the default behavior; `latest`: subscribe from the latest data; or `none`: can't subscribe without committed offset|
| `auto.offset.reset` | enum | Initial offset for the consumer group | `earliest`: subscribe from the earliest data, this is the default behavior(version < 3.2.0.0); `latest`: subscribe from the latest data, this is the default behavior(version >= 3.2.0.0); or `none`: can't subscribe without committed offset|
| `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true |
| `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds |
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages | default value: false
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages. Not applicable if subscribe to a column (tbname can be written as a column in the subquery statement during column subscriptions) (This parameter has been deprecated since version 3.2.0.0 and remains true) | default value: false
| `enable.replay` | boolean | Specify whether data replay function enabled or not |default value: false |
The method of specifying these parameters depends on the language used:
@ -459,7 +459,19 @@ from taos.tmq import Consumer
# Syntax: `consumer = Consumer(configs)`
#
# Example:
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
consumer = Consumer(
{
"group.id": "local",
"client.id": "1",
"enable.auto.commit": "true",
"auto.commit.interval.ms": "1000",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"auto.offset.reset": "earliest",
"msg.with.table.name": "true",
}
)
```
</TabItem>

View File

@ -12,7 +12,7 @@ The FQDN of all hosts must be setup properly. For e.g. FQDNs may have to be conf
### Step 1
If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](../../operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`.
If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`.
:::note
FQDN information is written to file. If you have started TDengine without configuring or changing the FQDN, ensure that data is backed up or no longer needed before running the `rm -rf /var/lib\taos/\*` command.

View File

@ -24,7 +24,7 @@ SELECT [hints] [DISTINCT] select_list
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
hint:
BATCH_SCAN | NO_BATCH_SCAN
BATCH_SCAN | NO_BATCH_SCAN | SORT_FOR_GROUP
select_list:
select_expr [, select_expr] ...
@ -87,15 +87,17 @@ Hints are a means of user control over query optimization for individual stateme
The list of currently supported Hints is as follows:
| **Hint** | **Params** | **Comment** | **Scopt** |
| :-----------: | -------------- | -------------------------- | -------------------------- |
| BATCH_SCAN | None | Batch table scan | JOIN statment for stable |
| NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable |
| **Hint** | **Params** | **Comment** | **Scopt** |
| :-----------: | -------------- | -------------------------- | -----------------------------------|
| BATCH_SCAN | None | Batch table scan | JOIN statment for stable |
| NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable |
| SORT_FOR_GROUP| None | Use sort for partition | With normal column in partition by list |
For example:
```sql
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
SELECT /*+ SORT_FOR_GROUP() */ count(*), c1 FROM stable1 PARTITION BY c1;
```
## Lists

View File

@ -54,6 +54,7 @@ LIKE is used together with wildcards to match strings. Its usage is described as
MATCH and NMATCH are used together with regular expressions to match strings. Their usage is described as follows:
- Use POSIX regular expression syntax. For more information, see Regular Expressions.
- The `MATCH` operator returns true when the regular expression is matched. The `NMATCH` operator returns true when the regular expression is not matched.
- Regular expression can be used against only table names, i.e. `tbname`, and tags/columns of binary/nchar types.
- The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on the client side, and will take effect after restarting the client.

View File

@ -180,6 +180,7 @@ The following list shows all reserved keywords:
- MAX_DELAY
- BWLIMIT
- MAXROWS
- MAX_SPEED
- MERGE
- META
- MINROWS

View File

@ -26,75 +26,85 @@ This document introduces the tables of INFORMATION_SCHEMA and their structure.
## INS_DNODES
Provides information about dnodes. Similar to SHOW DNODES.
Provides information about dnodes. Similar to SHOW DNODES. Users whose SYSINFO attribute is 0 can't view this table.
| # | **Column** | **Data Type** | **Description** |
| --- | :------------: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 1 | vnodes | SMALLINT | Current number of vnodes on the dnode. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 2 | support_vnodes | SMALLINT | Maximum number of vnodes on the dnode |
| 3 | status | BINARY(10) | Current status |
| 4 | note | BINARY(256) | Reason for going offline or other information |
| 3 | status | VARCHAR(10) | Current status |
| 4 | note | VARCHAR(256) | Reason for going offline or other information |
| 5 | id | SMALLINT | Dnode ID |
| 6 | endpoint | BINARY(134) | Dnode endpoint |
| 6 | endpoint | VARCHAR(134) | Dnode endpoint |
| 7 | create | TIMESTAMP | Creation time |
## INS_MNODES
Provides information about mnodes. Similar to SHOW MNODES.
Provides information about mnodes. Similar to SHOW MNODES. Users whose SYSINFO attribute is 0 can't view this table.
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------- | ------------------------------------------ |
| 1 | id | SMALLINT | Mnode ID |
| 2 | endpoint | BINARY(134) | Mnode endpoint |
| 3 | role | BINARY(10) | Current role |
| 2 | endpoint | VARCHAR(134) | Mnode endpoint |
| 3 | role | VARCHAR(10) | Current role |
| 4 | role_time | TIMESTAMP | Time at which the current role was assumed |
| 5 | create_time | TIMESTAMP | Creation time |
## INS_QNODES
Provides information about qnodes. Similar to SHOW QNODES.
Provides information about qnodes. Similar to SHOW QNODES. Users whose SYSINFO attribute is 0 can't view this table.
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------- | --------------- |
| 1 | id | SMALLINT | Qnode ID |
| 2 | endpoint | BINARY(134) | Qnode endpoint |
| 2 | endpoint | VARCHAR(134) | Qnode endpoint |
| 3 | create_time | TIMESTAMP | Creation time |
## INS_SNODES
Provides information about snodes. Similar to SHOW SNODES. Users whose SYSINFO attribute is 0 can't view this table.
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------- | --------------- |
| 1 | id | SMALLINT | Snode ID |
| 2 | endpoint | VARCHAR(134) | Snode endpoint |
| 3 | create_time | TIMESTAMP | Creation time |
## INS_CLUSTER
Provides information about the cluster.
Provides information about the cluster. Users whose SYSINFO attribute is 0 can't view this table.
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------- | --------------- |
| 1 | id | BIGINT | Cluster ID |
| 2 | name | BINARY(134) | Cluster name |
| 2 | name | VARCHAR(134) | Cluster name |
| 3 | create_time | TIMESTAMP | Creation time |
## INS_DATABASES
Provides information about user-created databases. Similar to SHOW DATABASES.
| # | **Column** | **Data Type** | **Description** |
| # | **Column** | **Data Type** | **Description** |
| --- | :------------------: | ---------------- | ------------------------------------------------ |
| 1| name| BINARY(32)| Database name |
| 1 | name | VARCHAR(64) | Database name |
| 2 | create_time | TIMESTAMP | Creation time |
| 3 | ntables | INT | Number of standard tables and subtables (not including supertables) |
| 4 | vgroups | INT | Number of vgroups. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 6 | replica | INT | Number of replicas. It should be noted that `replica` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 7 | strict | BINARY(4) | Obsoleted |
| 8 | duration | INT | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | keep | INT | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 7 | strict | VARCHAR(4) | Obsoleted |
| 8 | duration | VARCHAR(10) | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | keep | VARCHAR(32) | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 10 | buffer | INT | Write cache size per vnode, in MB. It should be noted that `buffer` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 11 | pagesize | INT | Page size for vnode metadata storage engine, in KB. It should be noted that `pagesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 12 | pages | INT | Number of pages per vnode metadata storage engine. It should be noted that `pages` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 13 | minrows | INT | Maximum number of records per file block. It should be noted that `minrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 14 | maxrows | INT | Minimum number of records per file block. It should be noted that `maxrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 15 | comp | INT | Compression method. It should be noted that `comp` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 16 | precision | BINARY(2) | Time precision. It should be noted that `precision` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 17 | status | BINARY(10) | Current database status |
| 18 | retentions | BINARY (60) | Aggregation interval and retention period. It should be noted that `retentions` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 16 | precision | VARCHAR(2) | Time precision. It should be noted that `precision` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 17 | status | VARCHAR(10) | Current database status |
| 18 | retentions | VARCHAR(60) | Aggregation interval and retention period. It should be noted that `retentions` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 19 | single_stable | BOOL | Whether the database can contain multiple supertables. It should be noted that `single_stable` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 20 | cachemodel | BINARY(60) | Caching method for the newest data. It should be noted that `cachemodel` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 20 | cachemodel | VARCHAR(60) | Caching method for the newest data. It should be noted that `cachemodel` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 21 | cachesize | INT | Memory per vnode used for caching the newest data. It should be noted that `cachesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 22 | wal_level | INT | WAL level. It should be noted that `wal_level` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk. It should be noted that `wal_fsync_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
@ -111,15 +121,15 @@ Provides information about user-defined functions.
| # | **Column** | **Data Type** | **Description** |
| --- | :-----------: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 1 | name | BINARY(64) | Function name |
| 2 | comment | BINARY(255) | Function description. It should be noted that `comment` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 1 | name | VARCHAR(64) | Function name |
| 2 | comment | VARCHAR(255) | Function description. It should be noted that `comment` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 3 | aggregate | INT | Whether the UDF is an aggregate function. It should be noted that `aggregate` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 4 | output_type | BINARY(31) | Output data type |
| 4 | output_type | VARCHAR(31) | Output data type |
| 5 | create_time | TIMESTAMP | Creation time |
| 6 | code_len | INT | Length of the source code |
| 7 | bufsize | INT | Buffer size |
| 8 | func_language | BINARY(31) | UDF programming language |
| 9 | func_body | BINARY(16384) | UDF function body |
| 8 | func_language | VARCHAR(31) | UDF programming language |
| 9 | func_body | VARCHAR(16384) | UDF function body |
| 10 | func_version | INT | UDF function version. starting from 0. Increasing by 1 each time it is updated |
## INS_INDEXES
@ -128,12 +138,12 @@ Provides information about user-created indices. Similar to SHOW INDEX.
| # | **Column** | **Data Type** | **Description** |
| --- | :--------------: | ------------- | --------------------------------------------------------------------- |
| 1 | db_name | BINARY(32) | Database containing the table with the specified index |
| 2 | table_name | BINARY(192) | Table containing the specified index |
| 3 | index_name | BINARY(192) | Index name |
| 4 | db_name | BINARY(64) | Index column |
| 5 | index_type | BINARY(10) | SMA or tag index |
| 6 | index_extensions | BINARY(256) | Other information For SMA/tag indices, this shows a list of functions |
| 1 | db_name | VARCHAR(32) | Database containing the table with the specified index |
| 2 | table_name | VARCHAR(192) | Table containing the specified index |
| 3 | index_name | VARCHAR(192) | Index name |
| 4 | db_name | VARCHAR(64) | Index column |
| 5 | index_type | VARCHAR(10) | SMA or tag index |
| 6 | index_extensions | VARCHAR(256) | Other information For SMA/tag indices, this shows a list of functions |
## INS_STABLES
@ -141,16 +151,16 @@ Provides information about supertables.
| # | **Column** | **Data Type** | **Description** |
| --- | :-----------: | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 1 | stable_name | BINARY(192) | Supertable name |
| 2 | db_name | BINARY(64) | All databases in the supertable |
| 1 | stable_name | VARCHAR(192) | Supertable name |
| 2 | db_name | VARCHAR(64) | All databases in the supertable |
| 3 | create_time | TIMESTAMP | Creation time |
| 4 | columns | INT | Number of columns |
| 5 | tags | INT | Number of tags. It should be noted that `tags` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 6 | last_update | TIMESTAMP | Last updated time |
| 7 | table_comment | BINARY(1024) | Table description |
| 8 | watermark | BINARY(64) | Window closing time. It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | max_delay | BINARY(64) | Maximum delay for pushing stream processing results. It should be noted that `max_delay` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 10 | rollup | BINARY(128) | Rollup aggregate function. It should be noted that `rollup` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 7 | table_comment | VARCHAR(1024) | Table description |
| 8 | watermark | VARCHAR(64) | Window closing time. It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | max_delay | VARCHAR(64) | Maximum delay for pushing stream processing results. It should be noted that `max_delay` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 10 | rollup | VARCHAR(128) | Rollup aggregate function. It should be noted that `rollup` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
## INS_TABLES
@ -158,37 +168,37 @@ Provides information about standard tables and subtables.
| # | **Column** | **Data Type** | **Description** |
| --- | :-----------: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------- |
| 1 | table_name | BINARY(192) | Table name |
| 2 | db_name | BINARY(64) | Database name |
| 1 | table_name | VARCHAR(192) | Table name |
| 2 | db_name | VARCHAR(64) | Database name |
| 3 | create_time | TIMESTAMP | Creation time |
| 4 | columns | INT | Number of columns |
| 5 | stable_name | BINARY(192) | Supertable name |
| 5 | stable_name | VARCHAR(192) | Supertable name |
| 6 | uid | BIGINT | Table ID |
| 7 | vgroup_id | INT | Vgroup ID |
| 8 | ttl | INT | Table time-to-live. It should be noted that `ttl` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | table_comment | BINARY(1024) | Table description |
| 10 | type | BINARY(20) | Table type |
| 9 | table_comment | VARCHAR(1024) | Table description |
| 10 | type | VARCHAR(20) | Table type |
## INS_TAGS
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------- | --------------- |
| 1 | table_name | BINARY(192) | Table name |
| 2 | db_name | BINARY(64) | Database name |
| 3 | stable_name | BINARY(192) | Supertable name |
| 4 | tag_name | BINARY(64) | Tag name |
| 5 | tag_type | BINARY(64) | Tag type |
| 6 | tag_value | BINARY(16384) | Tag value |
| 1 | table_name | VARCHAR(192) | Table name |
| 2 | db_name | VARCHAR(64) | Database name |
| 3 | stable_name | VARCHAR(192) | Supertable name |
| 4 | tag_name | VARCHAR(64) | Tag name |
| 5 | tag_type | VARCHAR(64) | Tag type |
| 6 | tag_value | VARCHAR(16384) | Tag value |
## INS_COLUMNS
| # | **Column** | **Data Type** | **Description** |
| --- | :-----------: | ------------- | ---------------- |
| 1 | table_name | BINARY(192) | Table name |
| 2 | db_name | BINARY(64) | Database name |
| 3 | table_type | BINARY(21) | Table type |
| 4 | col_name | BINARY(64) | Column name |
| 5 | col_type | BINARY(32) | Column type |
| 1 | table_name | VARCHAR(192) | Table name |
| 2 | db_name | VARCHAR(64) | Database name |
| 3 | table_type | VARCHAR(21) | Table type |
| 4 | col_name | VARCHAR(64) | Column name |
| 5 | col_type | VARCHAR(32) | Column type |
| 6 | col_length | INT | Column length |
| 7 | col_precision | INT | Column precision |
| 8 | col_scale | INT | Column scale |
@ -196,51 +206,51 @@ Provides information about standard tables and subtables.
## INS_USERS
Provides information about TDengine users.
Provides information about TDengine users. Users whose SYSINFO attribute is 0 can't view this table.
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------- | ---------------- |
| 1 | user_name | BINARY(23) | User name |
| 2 | privilege | BINARY(256) | User permissions |
| 1 | user_name | VARCHAR(23) | User name |
| 2 | privilege | VARCHAR(256) | User permissions |
| 3 | create_time | TIMESTAMP | Creation time |
## INS_GRANTS
Provides information about TDengine Enterprise Edition permissions.
Provides information about TDengine Enterprise Edition permissions. Users whose SYSINFO attribute is 0 can't view this table.
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 1 | version | BINARY(9) | Whether the deployment is a licensed or trial version |
| 2 | cpu_cores | BINARY(9) | CPU cores included in license |
| 3 | dnodes | BINARY(10) | Dnodes included in license. It should be noted that `dnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 4 | streams | BINARY(10) | Streams included in license. It should be noted that `streams` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 5 | users | BINARY(10) | Users included in license. It should be noted that `users` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 6 | accounts | BINARY(10) | Accounts included in license. It should be noted that `accounts` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 7 | storage | BINARY(21) | Storage space included in license. It should be noted that `storage` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 8 | connections | BINARY(21) | Client connections included in license. It should be noted that `connections` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | databases | BINARY(11) | Databases included in license. It should be noted that `databases` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 10 | speed | BINARY(9) | Write speed specified in license (data points per second) |
| 11 | querytime | BINARY(9) | Total query time specified in license |
| 12 | timeseries | BINARY(21) | Number of metrics included in license |
| 13 | expired | BINARY(5) | Whether the license has expired |
| 14 | expire_time | BINARY(19) | When the trial period expires |
| 1 | version | VARCHAR(9) | Whether the deployment is a licensed or trial version |
| 2 | cpu_cores | VARCHAR(9) | CPU cores included in license |
| 3 | dnodes | VARCHAR(10) | Dnodes included in license. It should be noted that `dnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 4 | streams | VARCHAR(10) | Streams included in license. It should be noted that `streams` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 5 | users | VARCHAR(10) | Users included in license. It should be noted that `users` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 6 | accounts | VARCHAR(10) | Accounts included in license. It should be noted that `accounts` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 7 | storage | VARCHAR(21) | Storage space included in license. It should be noted that `storage` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 8 | connections | VARCHAR(21) | Client connections included in license. It should be noted that `connections` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | databases | VARCHAR(11) | Databases included in license. It should be noted that `databases` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 10 | speed | VARCHAR(9) | Write speed specified in license (data points per second) |
| 11 | querytime | VARCHAR(9) | Total query time specified in license |
| 12 | timeseries | VARCHAR(21) | Number of metrics included in license |
| 13 | expired | VARCHAR(5) | Whether the license has expired |
| 14 | expire_time | VARCHAR(19) | When the trial period expires |
## INS_VGROUPS
Provides information about vgroups.
Provides information about vgroups. Users whose SYSINFO attribute is 0 can't view this table.
| # | **Column** | **Data Type** | **Description** |
| --- | :--------: | ------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
| 1 | vgroup_id | INT | Vgroup ID |
| 2 | db_name | BINARY(32) | Database name |
| 2 | db_name | VARCHAR(32) | Database name |
| 3 | tables | INT | Tables in vgroup. It should be noted that `tables` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 4 | status | BINARY(10) | Vgroup status |
| 4 | status | VARCHAR(10) | Vgroup status |
| 5 | v1_dnode | INT | Dnode ID of first vgroup member |
| 6 | v1_status | BINARY(10) | Status of first vgroup member |
| 6 | v1_status | VARCHAR(10) | Status of first vgroup member |
| 7 | v2_dnode | INT | Dnode ID of second vgroup member |
| 8 | v2_status | BINARY(10) | Status of second vgroup member |
| 8 | v2_status | VARCHAR(10) | Status of second vgroup member |
| 9 | v3_dnode | INT | Dnode ID of third vgroup member |
| 10 | v3_status | BINARY(10) | Status of third vgroup member |
| 10 | v3_status | VARCHAR(10) | Status of third vgroup member |
| 11 | nfiles | INT | Number of data and metadata files in the vgroup |
| 12 | file_size | INT | Size of the data and metadata files in the vgroup |
| 13 | tsma | TINYINT | Whether time-range-wise SMA is enabled. 1 means enabled; 0 means disabled. |
@ -251,55 +261,57 @@ Provides system configuration information.
| # | **Column** | **Data Type** | **Description** |
| --- | :--------: | ------------- | ----------------------------------------------------------------------------------------------------------------------- |
| 1 | name | BINARY(32) | Parameter |
| 2 | value | BINARY(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 1 | name | VARCHAR(32) | Parameter |
| 2 | value | VARCHAR(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
## INS_DNODE_VARIABLES
Provides dnode configuration information.
Provides dnode configuration information. Users whose SYSINFO attribute is 0 can't view this table.
| # | **Column** | **Data Type** | **Description** |
| --- | :--------: | ------------- | ----------------------------------------------------------------------------------------------------------------------- |
| 1 | dnode_id | INT | Dnode ID |
| 2 | name | BINARY(32) | Parameter |
| 3 | value | BINARY(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 2 | name | VARCHAR(32) | Parameter |
| 3 | value | VARCHAR(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
## INS_TOPICS
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------- | -------------------------------------- |
| 1 | topic_name | BINARY(192) | Topic name |
| 2 | db_name | BINARY(64) | Database for the topic |
| 1 | topic_name | VARCHAR(192) | Topic name |
| 2 | db_name | VARCHAR(64) | Database for the topic |
| 3 | create_time | TIMESTAMP | Creation time |
| 4 | sql | BINARY(1024) | SQL statement used to create the topic |
| 4 | sql | VARCHAR(1024) | SQL statement used to create the topic |
## INS_SUBSCRIPTIONS
| # | **Column** | **Data Type** | **Description** |
| --- | :------------: | ------------- | --------------------------- |
| 1 | topic_name | BINARY(204) | Subscribed topic |
| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
| 1 | topic_name | VARCHAR(204) | Subscribed topic |
| 2 | consumer_group | VARCHAR(193) | Subscribed consumer group |
| 3 | vgroup_id | INT | Vgroup ID for the consumer |
| 4 | consumer_id | BIGINT | Consumer ID |
| 5 | offset | BINARY(64) | Consumption progress |
| 5 | offset | VARCHAR(64) | Consumption progress |
| 6 | rows | BIGINT | Number of consumption items |
## INS_STREAMS
| # | **Column** | **Data Type** | **Description** |
| --- | :----------: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 1 | stream_name | BINARY(64) | Stream name |
| 1 | stream_name | VARCHAR(64) | Stream name |
| 2 | create_time | TIMESTAMP | Creation time |
| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
| 4 | status | BINARY(20) | Current status |
| 5 | source_db | BINARY(64) | Source database |
| 6 | target_db | BINARY(64) | Target database |
| 7 | target_table | BINARY(192) | Target table |
| 3 | sql | VARCHAR(1024) | SQL statement used to create the stream |
| 4 | status | VARCHAR(20) | Current status |
| 5 | source_db | VARCHAR(64) | Source database |
| 6 | target_db | VARCHAR(64) | Target database |
| 7 | target_table | VARCHAR(192) | Target table |
| 8 | watermark | BIGINT | Watermark (see stream processing documentation). It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation). It should be noted that `trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
## INS_USER_PRIVILEGES
Users whose SYSINFO attribute is 0 can't view this table.
| # | **Column** | **Data Type** | **Description** |** |
| --- | :----------: | ------------ | -------------------------------------------|
| 1 | user_name | VARCHAR(24) | Username |

View File

@ -73,10 +73,10 @@ Shows the SQL statement used to create the specified table. This statement can b
## SHOW DATABASES
```sql
SHOW DATABASES;
SHOW [USER | SYSTEM] DATABASES;
```
Shows all user-created databases.
Shows all databases. The `USER` qualifier specifies only user-created databases. The `SYSTEM` qualifier specifies only system databases.
## SHOW DNODES
@ -183,10 +183,10 @@ Shows all subscriptions in the system.
## SHOW TABLES
```sql
SHOW [db_name.]TABLES [LIKE 'pattern'];
SHOW [NORMAL | CHILD] [db_name.]TABLES [LIKE 'pattern'];
```
Shows all standard tables and subtables in the current database. You can use LIKE for fuzzy matching.
Shows all standard tables and subtables in the current database. You can use LIKE for fuzzy matching. The `Normal` qualifier specifies standard tables. The `CHILD` qualifier specifies subtables.
## SHOW TABLE DISTRIBUTED

View File

@ -1,178 +0,0 @@
---
title: Install and Uninstall
description: This document describes how to install, upgrade, and uninstall TDengine.
---
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
This document gives more information about installing, uninstalling, and upgrading TDengine.
## Install
About details of installing TDenine, please refer to [Installation Guide](../../get-started/package/).
## Uninstall
<Tabs>
<TabItem label="Uninstall by apt-get" value="aptremove">
Uninstall package of TDengine by apt-get can be uninstalled as below:
```bash
$ sudo apt-get remove tdengine
Reading package lists... Done
Building dependency tree
Reading state information... Done
The following packages will be REMOVED:
tdengine
0 upgraded, 0 newly installed, 1 to remove and 18 not upgraded.
After this operation, 68.3 MB disk space will be freed.
Do you want to continue? [Y/n] y
(Reading database ... 135625 files and directories currently installed.)
Removing tdengine (3.0.0.0) ...
TDengine is removed successfully!
```
If you have installed taos-tools, please uninstall it first before uninstall TDengine. The command of uninstall is following:
```
$ sudo apt remove taostools
Reading package lists... Done
Building dependency tree
Reading state information... Done
The following packages will be REMOVED:
taostools
0 upgraded, 0 newly installed, 1 to remove and 0 not upgraded.
After this operation, 68.3 MB disk space will be freed.
Do you want to continue? [Y/n]
(Reading database ... 147973 files and directories currently installed.)
Removing taostools (2.1.2) ...
```
</TabItem>
<TabItem label="Uninstall Deb" value="debuninst">
Deb package of TDengine can be uninstalled as below:
```
$ sudo dpkg -r tdengine
(Reading database ... 137504 files and directories currently installed.)
Removing tdengine (3.0.0.0) ...
TDengine is removed successfully!
```
Deb package of taosTools can be uninstalled as below:
```
$ sudo dpkg -r taostools
(Reading database ... 147973 files and directories currently installed.)
Removing taostools (2.1.2) ...
```
</TabItem>
<TabItem label="Uninstall RPM" value="rpmuninst">
RPM package of TDengine can be uninstalled as below:
```
$ sudo rpm -e tdengine
TDengine is removed successfully!
```
RPM package of taosTools can be uninstalled as below:
```
sudo rpm -e taostools
taosToole is removed successfully!
```
</TabItem>
<TabItem label="Uninstall tar.gz" value="taruninst">
tar.gz package of TDengine can be uninstalled as below:
```
$ rmtaos
TDengine is removed successfully!
```
tar.gz package of taosTools can be uninstalled as below:
```
$ rmtaostools
Start to uninstall taos tools ...
taos tools is uninstalled successfully!
```
</TabItem>
<TabItem label="Windows uninstall" value="windows">
Run C:\TDengine\unins000.exe to uninstall TDengine on a Windows system.
</TabItem>
<TabItem label="Mac uninstall" value="mac">
TDengine can be uninstalled as below:
```
$ rmtaos
TDengine is removed successfully!
```
</TabItem>
</Tabs>
:::info
- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine. The packages may affect each other and cause errors.
- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information.
```
$ sudo rm -f /var/lib/dpkg/info/tdengine*
```
You can then reinstall if needed.
- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information.
```
$ sudo rpm -e --noscripts tdengine
```
You can then reinstall if needed.
:::
Uninstalling and Modifying Files
- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data.
- When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used.
## Upgrade
There are two aspects in upgrade operation: upgrade installation package and upgrade a running server.
To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version.
Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 2 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
- Stop inserting data
- Make sure all data is persisted to disk, please use command `flush database`
- Stop the cluster of TDengine
- Uninstall old version and install new version
- Start the cluster of TDengine
- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss
- Run some simple data insertion statements to make sure the cluster works well
- Restore business services
:::warning
TDengine doesn't guarantee any lower version is compatible with the data generated by a higher version, so it's never recommended to downgrade the version.
:::

View File

@ -41,8 +41,6 @@ An existing Grafana Notification Channel can be specified with parameter `-E`, t
Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
For more use cases and restrictions please refer to [TDinsight](/reference/tdinsight/).
## log database
The data of tdinsight dashboard is stored in `log` database (default. You can change it in taoskeeper's config file. For more infrmation, please reference to [taoskeeper document](/reference/taosKeeper)). The taoskeeper will create log database on taoskeeper startup.
@ -106,22 +104,22 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|uptime|FLOAT||dnode uptime|
|uptime|FLOAT||dnode uptime in `days`|
|cpu\_engine|FLOAT||cpu usage of tdengine. read from `/proc/<taosd_pid>/stat`|
|cpu\_system|FLOAT||cpu usage of server. read from `/proc/stat`|
|cpu\_cores|FLOAT||cpu cores of server|
|mem\_engine|INT||memory usage of tdengine. read from `/proc/<taosd_pid>/status`|
|mem\_system|INT||available memory on the server|
|mem\_system|INT||available memory on the server in `KB`|
|mem\_total|INT||total memory of server in `KB`|
|disk\_engine|INT|||
|disk\_used|BIGINT||usage of data dir in `bytes`|
|disk\_total|BIGINT||the capacity of data dir in `bytes`|
|net\_in|FLOAT||network throughput rate in kb/s. read from `/proc/net/dev`|
|net\_out|FLOAT||network throughput rate in kb/s. read from `/proc/net/dev`|
|io\_read|FLOAT||io throughput rate in kb/s. read from `/proc/<taosd_pid>/io`|
|io\_write|FLOAT||io throughput rate in kb/s. read from `/proc/<taosd_pid>/io`|
|io\_read\_disk|FLOAT||io throughput rate of disk in kb/s. read from `/proc/<taosd_pid>/io`|
|io\_write\_disk|FLOAT||io throughput rate of disk in kb/s. read from `/proc/<taosd_pid>/io`|
|net\_in|FLOAT||network throughput rate in byte/s. read from `/proc/net/dev`|
|net\_out|FLOAT||network throughput rate in byte/s. read from `/proc/net/dev`|
|io\_read|FLOAT||io throughput rate in byte/s. read from `/proc/<taosd_pid>/io`|
|io\_write|FLOAT||io throughput rate in byte/s. read from `/proc/<taosd_pid>/io`|
|io\_read\_disk|FLOAT||io throughput rate of disk in byte/s. read from `/proc/<taosd_pid>/io`|
|io\_write\_disk|FLOAT||io throughput rate of disk in byte/s. read from `/proc/<taosd_pid>/io`|
|req\_select|INT||number of select queries received per dnode|
|req\_select\_rate|FLOAT||number of select queries received per dnode divided by monitor interval.|
|req\_insert|INT||number of insert queries received per dnode|
@ -150,9 +148,9 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|ts|TIMESTAMP||timestamp|
|name|NCHAR||data directory. default is `/var/lib/taos`|
|level|INT||level for multi-level storage|
|avail|BIGINT||available space for data directory|
|used|BIGINT||used space for data directory|
|total|BIGINT||total space for data directory|
|avail|BIGINT||available space for data directory in `bytes`|
|used|BIGINT||used space for data directory in `bytes`|
|total|BIGINT||total space for data directory in `bytes`|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
@ -165,9 +163,9 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||log directory. default is `/var/log/taos/`|
|avail|BIGINT||available space for log directory|
|used|BIGINT||used space for data directory|
|total|BIGINT||total space for data directory|
|avail|BIGINT||available space for log directory in `bytes`|
|used|BIGINT||used space for data directory in `bytes`|
|total|BIGINT||total space for data directory in `bytes`|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
@ -180,9 +178,9 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||temp directory. default is `/tmp/`|
|avail|BIGINT||available space for temp directory|
|used|BIGINT||used space for temp directory|
|total|BIGINT||total space for temp directory|
|avail|BIGINT||available space for temp directory in `bytes`|
|used|BIGINT||used space for temp directory in `bytes`|
|total|BIGINT||total space for temp directory in `bytes`|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|

View File

@ -31,11 +31,13 @@ We recommend using the latest version of `taospy`, regardless of the version of
|Python Connector Version|major changes|
|:-------------------:|:----:|
|2.7.12|1. added support for `varbinary` type (STMT does not yet support)<br/> 2. improved query performance (thanks to contributor [hadrianl](https://github.com/taosdata/taos-connector-python/pull/209))|
|2.7.9|support for getting assignment and seek function on subscription|
|2.7.8|add `execute_many` method|
|Python Websocket Connector Version|major changes|
|:----------------------------:|:-----:|
|0.2.9|bugs fixes|
|0.2.5|1. support for getting assignment and seek function on subscription <br/> 2. support schemaless <br/> 3. support STMT|
|0.2.4|support `unsubscribe` on subscription|
@ -1023,10 +1025,6 @@ Due to the current imperfection of Python's nanosecond support (see link below),
1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds
2. https://www.python.org/dev/peps/pep-0564/
## Important Update
[**Release Notes**] (https://github.com/taosdata/taos-connector-python/releases)
## API Reference
- [taos](https://docs.taosdata.com/api/taospy/taos/)

View File

@ -52,8 +52,6 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
&& tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1
```
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please find available versions in [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
**Non-Swoole Environment: **
```shell

View File

@ -4,7 +4,6 @@ import PkgListV3 from "/components/PkgListV3";
<PkgListV3 type={1} sys="Linux" />
[All Downloads](../../releases/tdengine)
2. Unzip

View File

@ -4,8 +4,6 @@ import PkgListV3 from "/components/PkgListV3";
<PkgListV3 type={8} sys="macOS" />
[All Downloads](../../releases/tdengine)
2. Execute the installer, select the default value as prompted, and complete the installation. If the installation is blocked, you can right-click or ctrl-click on the installation package and select `Open`.
3. configure taos.cfg

View File

@ -3,8 +3,6 @@ import PkgListV3 from "/components/PkgListV3";
1. Download the client installation package
<PkgListV3 type={4} sys="Windows" />
[All Downloads](../../releases/tdengine)
2. Execute the installer, select the default value as prompted, and complete the installation
3. Installation path

View File

@ -31,7 +31,7 @@ taosAdapter provides the following features.
### Install taosAdapter
If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine 3.0 released versions](../../releases/tdengine) to download the TDengine server installation package. If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/3.0/BUILD.md) documentation.
If you use the TDengine server, you don't need additional steps to install taosAdapter. If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/3.0/BUILD.md) documentation.
### Start/Stop taosAdapter
@ -180,7 +180,7 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl
node_export is an exporter for machine metrics. Please visit [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) for more information.
- Support for Prometheus remote_read and remote_write
remote_read and remote_write are interfaces for Prometheus data read and write from/to other data storage solution. Please visit [https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) for more information.
- Get table's VGroup ID. For more information about VGroup, please refer to [primary-logic-unit](/tdinternal/arch/#primary-logic-unit).
- Get table's VGroup ID.
## Interfaces
@ -246,7 +246,7 @@ node_export is an exporter of hardware and OS metrics exposed by the \*NIX kerne
### Get table's VGroup ID
You can call `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` to get table's VGroup ID. For more information about VGroup, please refer to [primary-logic-unit](/tdinternal/arch/#primary-logic-unit).
You can call `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` to get table's VGroup ID.
## Memory usage optimization methods

View File

@ -13,7 +13,7 @@ taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDen
There are two ways to install taosBenchmark:
- Installing the official TDengine installer will automatically install taosBenchmark. Please refer to [TDengine installation](../../operation/pkg-install) for details.
- Installing the official TDengine installer will automatically install taosBenchmark.
- Compile taos-tools separately and install them. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details.
@ -397,6 +397,7 @@ The configuration parameters for specifying super table tag columns and data col
### Query scenario configuration parameters
`filetype` must be set to `query` in the query scenario.
`query_times` is number of times queries were run.
To control the query scenario by setting `kill_slow_query_threshold` and `kill_slow_query_interval` parameters to kill the execution of slow query statements. Threshold controls exec_usec of query command will be killed by taosBenchmark after the specified time, in seconds; interval controls sleep time to avoid continuous querying of slow queries consuming CPU in seconds.

View File

@ -103,7 +103,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
use letter and number only. Default is NOT.
-n, --no-escape No escape char '`'. Default is using it.
-Q, --dot-replace Repalce dot character with underline character in
the table name.
the table name.(Version 2.5.3)
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
8.
-C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service
@ -113,6 +113,10 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
-?, --help Give this help list
--usage Give a short usage message
-V, --version Print program version
-W, --rename=RENAME-LIST Rename database name with new name during
importing data. RENAME-LIST:
"db1=newDB1|db2=newDB2" means rename db1 to newDB1
and rename db2 to newDB2 (Version 2.5.4)
Mandatory or optional arguments to long options are also mandatory or optional
for any corresponding short options.

View File

@ -16,7 +16,7 @@ taosKeeper is a tool for TDengine that exports monitoring metrics. With taosKeep
There are two ways to install taosKeeper:
Methods of installing taosKeeper:
- Installing the official TDengine installer will automatically install taosKeeper. Please refer to [TDengine installation](../../operation/pkg-install) for details.
- Installing the official TDengine installer will automatically install taosKeeper.
- You can compile taosKeeper separately and install it. Please refer to the [taosKeeper](https://github.com/taosdata/taoskeeper) repository for details.
## Configuration and Launch

View File

@ -21,7 +21,7 @@ TDengine Source Connector is used to read data from TDengine in real-time and se
1. Linux operating system
2. Java 8 and Maven installed
3. Git/curl/vi is installed
4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](../../operation/pkg-install)
4. TDengine is installed and started.
## Install Kafka

View File

@ -10,76 +10,60 @@ description: How to use Seeq and TDengine to perform time series data analysis
Seeq is an advanced analytics software for the manufacturing industry and the Industrial Internet of Things (IIoT). Seeq supports the use of machine learning innovations within process manufacturing organizations. These capabilities enable organizations to deploy their own or third-party machine learning algorithms into advanced analytics applications used by frontline process engineers and subject matter experts, thus extending the efforts of a single data scientist to many frontline workers.
With the TDengine Java connector, Seeq effortlessly supports querying time series data provided by TDengine and offers functionalities such as data visualization, analysis, and forecasting.
TDengine can be added as a data source into Seeq via JDBC connector. Once data source is configured, Seeq can read data from TDengine and offers functionalities such as data visualization, analysis, and forecasting.
### Install Seeq
## Prerequisite
Please download Seeq Server and Seeq Data Lab software installation package from the [Seeq official website](https://www.seeq.com/customer-download).
1. Install Seeq Server and Seeq Data Lab software
2. Install TDengine or register TDengine Cloud service
### Install and start Seeq Server
```
tar xvzf seeq-server-xxx.tar.gz
cd seeq-server-installer
sudo ./install
sudo seeq service enable
sudo seeq start
```
### Install and start Seeq Data Lab Server
Seeq Data Lab needs to be installed on a separate server from Seeq Server and connected to Seeq Server through configuration. For detailed installation and configuration instructions, please refer to [the official documentation](https://support.seeq.com/space/KB/1034059842).
```
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq
sudo seeq config set Network/DataLab/Hostname localhost
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231)
sudo seeq config set Network/Hostname <value> # the host IP or URL of the main Seeq Server
# If the main Seeq server is configured to listen over HTTPS
sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443)
# If the main Seeq server is NOT configured to listen over HTTPS
sudo seeq config set Network/Webserver/Port <value>
#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server:
sudo seeq config set Network/DataLab/Hostname <value> # the host IP (not URL) of the Data Lab server
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231
```
### Install TDengine on-premise instance
See [Quick Install from Package](../../get-started).
### Or use TDengine Cloud
Register for a [TDengine Cloud](https://cloud.tdengine.com) account and log in to your account.
## Make Seeq be able to access TDengine
1. Get data location configuration
## Install TDengine JDBC connector
1. Get Seeq data location configuration
```
sudo seeq config get Folders/Data
```
2. Download TDengine Java connector from maven.org. Please use the latest version (Current is 3.2.5, https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar).
2. Download the latest TDengine Java connector from maven.org (current is version is [3.2.5](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar)), and copy the JAR file into the_directory_found_in_step_1/plugins/lib/
3. Restart Seeq server
```
sudo seeq restart
```
4. Input License
## Add TDengine into Seeq's data source
1. Open Seeq, login as admin, go to Administration, click "Add Data Source"
2. For connector, choose SQL connector v2
3. Inside "Additional Configuration" input box, copy and paste the following
Use a browser to access ip:34216 and input the license according to the guide.
```
{
"QueryDefinitions": []
"Type": "GENERIC",
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": null,
"Password": null,
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
"UseWindowsAuth": false,
"SqlFetchBatchSize": 100000,
"UseSSL": false,
"JdbcProperties": null,
"GenericDatabaseConfig": {
"DatabaseJdbcUrl": "jdbc:TAOS-RS://localhost:6030/?user=root&password=taosdata",
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
"ResolutionInNanoseconds": 1000,
"ZonedColumnTypes": []
}
}
```
## How to use Seeq to analyze time-series data that TDengine serves
Note: You need to replace DatabaseJdbcUrl with your setting. Please login TDengine cloud or open taosExplorer for enterprise edition, click programming -> Java to find yours. For the "QueryDefintions", please follow the examples below to write your own.
This chapter demonstrates how to use Seeq software in conjunction with TDengine for time series data analysis.
## Use Seeq to analyze time-series data stored inside TDengine
This chapter demonstrates how to use Seeq with TDengine for time series data analysis.
### Scenario Overview
@ -150,8 +134,8 @@ Please login with Seeq administrator and create a few data sources as following.
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"Username": null,
"Password": null,
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
@ -210,8 +194,8 @@ Please login with Seeq administrator and create a few data sources as following.
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"Username": null,
"Password": null,
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
@ -269,8 +253,8 @@ Please login with Seeq administrator and create a few data sources as following.
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"Username": null,
"Password": null,
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
@ -289,13 +273,13 @@ Please login with Seeq administrator and create a few data sources as following.
#### Launch Seeq Workbench
Please login to Seeq server with IP:port and create a new Seeq Workbench, then select data sources and choose the correct tools to do data visualization and analysis. Please refer to [the official documentation](https://support.seeq.com/space/KB/146440193/Seeq+Workbench) for the details.
Please login to Seeq server and create a new Seeq Workbench, then select data sources and choose the correct tools to do data visualization and analysis. Please refer to [the official documentation](https://support.seeq.com/space/KB/146440193/Seeq+Workbench) for the details.
![Seeq Workbench](./seeq/seeq-demo-workbench.webp)
#### Use Seeq Data Lab Server for advanced data analysis
Please login to the Seeq service with IP:port and create a new Seeq Data Lab. Then you can use advanced tools including Python environment and machine learning add-ons for more complex analysis.
Please login to the Seeq service and create a new Seeq Data Lab. Then you can use advanced tools including Python environment and machine learning add-ons for more complex analysis.
```Python
from seeq import spy
@ -370,13 +354,15 @@ Please note that when using TDengine Cloud, you need to specify the database nam
#### The data source of TDengine Cloud example
This data source contains the data from a smart meter in public database smartmeters.
```
{
"QueryDefinitions": [
{
"Name": "CloudVoltage",
"Type": "SIGNAL",
"Sql": "SELECT ts, voltage FROM test.meters",
"Sql": "SELECT ts, voltage FROM smartmeters.d1000",
"Enabled": true,
"TestMode": false,
"TestQueriesDuringSync": true,
@ -409,8 +395,8 @@ Please note that when using TDengine Cloud, you need to specify the database nam
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"Username": null,
"Password": null,
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
@ -419,7 +405,7 @@ Please note that when using TDengine Cloud, you need to specify the database nam
"UseSSL": false,
"JdbcProperties": null,
"GenericDatabaseConfig": {
"DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.cloud.taosdata.com?useSSL=true&token=41ac9d61d641b6b334e8b76f45f5a8XXXXXXXXXX",
"DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.us-west-2.aws.cloud.tdengine.com?useSSL=true&token=42b874395452d36f38dd6bf4317757611b213683",
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
"ResolutionInNanoseconds": 1000,
"ZonedColumnTypes": []
@ -433,8 +419,8 @@ Please note that when using TDengine Cloud, you need to specify the database nam
## Conclusion
By integrating Seeq and TDengine, it is possible to leverage the efficient storage and querying performance of TDengine while also benefiting from Seeq's powerful data visualization and analysis capabilities provided to users.
By integrating Seeq and TDengine, you can leverage the efficient storage and querying performance of TDengine while also benefiting from Seeq's powerful data visualization and analysis capabilities provided to users.
This integration allows users to take advantage of TDengine's high-performance time-series data storage and retrieval, ensuring efficient handling of large volumes of data. At the same time, Seeq provides advanced analytics features such as data visualization, anomaly detection, correlation analysis, and predictive modeling, enabling users to gain valuable insights and make data-driven decisions.
This integration allows users to take advantage of TDengine's high-performance time-series data storage and query, ensuring efficient handling of large volumes of data. At the same time, Seeq provides advanced analytics features such as data visualization, anomaly detection, correlation analysis, and predictive modeling, enabling users to gain valuable insights and make data-driven decisions.
Together, Seeq and TDengine provide a comprehensive solution for time series data analysis in diverse industries such as manufacturing, IIoT, and power systems. The combination of efficient data storage and advanced analytics empowers users to unlock the full potential of their time series data, driving operational improvements, and enabling predictive and prescriptive analytics applications.

View File

@ -4,20 +4,14 @@ description: 简要介绍 TDengine 的主要功能
toc_max_heading_level: 2
---
TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/)且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本
TDengine 是一款专为物联网、工业互联网等场景设计并优化的大数据平台,它能安全高效地将大量设备、数据采集器每天产生的高达 TB 甚至 PB 级的数据进行汇聚、存储、分析和分发,对业务运行状态进行实时监测、预警,提供实时的商业洞察。其核心模块是高性能、集群开源、云原生、极简的时序数据库 TDengine OSS
本章节介绍 TDengine 的主要产品和功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
## 主要产品
TDengine 有三个主要产品TDengine Enterprise (即 TDengine 企业版TDengine Cloud和 TDengine OSS关于它们的具体定义请参考
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
- [TDengine 开源版](https://www.taosdata.com/tdengine-oss)
本节介绍 TDengine OSS 的主要产品和功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine OSS 有个整体了解
## 主要功能
TDengine 的主要功能如下:
TDengine OSS 的主要功能如下:
1. 写入数据,支持
- [SQL 写入](../develop/insert-data/sql-writing)
@ -150,3 +144,10 @@ TDengine 的主要功能如下:
- [TDengine VS InfluxDB ,写入性能大 PK ](https://www.taosdata.com/2021/11/05/3248.html)
- [TDengine 和 InfluxDB 查询性能对比测试报告](https://www.taosdata.com/2022/02/22/5969.html)
- [TDengine 与 InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse 等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf)
## 主要产品
TDengine 有两个主要产品TDengine Enterprise (即 TDengine 企业版)和 TDengine Cloud关于它们的具体定义请参考
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)

View File

@ -4,7 +4,7 @@ description: '快速设置 TDengine 环境并体验其高效写入和查询'
---
import xiaot from './xiaot.webp'
import xiaot_new from './xiaot-03.webp'
import xiaot_new from './xiaot-20231007.png'
import channel from './channel.webp'
import official_account from './official-account.webp'

Binary file not shown.

After

Width:  |  Height:  |  Size: 112 KiB

View File

@ -63,17 +63,17 @@ import CDemo from "./_sub_c.mdx";
typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param));
typedef enum tmq_conf_res_t {
TMQ_CONF_UNKNOWN = -2,
TMQ_CONF_INVALID = -1,
TMQ_CONF_OK = 0,
} tmq_conf_res_t;
TMQ_CONF_UNKNOWN = -2,
TMQ_CONF_INVALID = -1,
TMQ_CONF_OK = 0,
} tmq_conf_res_t;
typedef struct tmq_topic_assignment {
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end;
} tmq_topic_assignment;
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end;
} tmq_topic_assignment;
DLL_EXPORT tmq_conf_t *tmq_conf_new();
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
@ -106,7 +106,7 @@ import CDemo from "./_sub_c.mdx";
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res);
DLL_EXPORT const char *tmq_err2str(int32_t code);DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
DLL_EXPORT const char *tmq_err2str(int32_t code);
```
下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
@ -351,10 +351,10 @@ CREATE TOPIC topic_name [with meta] AS DATABASE db_name;
| `td.connect.port` | integer | 服务端的端口号 | |
| `group.id` | string | 消费组 ID同一消费组共享消费进度 | <br />**必填项**。最大长度192。<br />每个topic最多可建立100个 consumer group |
| `client.id` | string | 客户端 ID | 最大长度192。 |
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default;从头开始订阅; <br/>`latest`: 仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default(version < 3.2.0.0);从头开始订阅; <br/>`latest`: default(version >= 3.2.0.0);仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
| `enable.auto.commit` | boolean | 是否启用消费位点自动提交true: 自动提交客户端应用无需commitfalse客户端应用需要自行commit | 默认值为 true |
| `auto.commit.interval.ms` | integer | 消费记录自动提交消费位点时间间隔,单位为毫秒 | 默认值为 5000 |
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句) |默认关闭 |
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句)从3.2.0.0版本该参数废弃恒为true |默认关闭 |
| `enable.replay` | boolean | 是否开启数据回放功能 |默认关闭 |
对于不同编程语言,其设置方式如下:
@ -460,7 +460,19 @@ from taos.tmq import Consumer
# Syntax: `consumer = Consumer(configs)`
#
# Example:
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
consumer = Consumer(
{
"group.id": "local",
"client.id": "1",
"enable.auto.commit": "true",
"auto.commit.interval.ms": "1000",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"auto.offset.reset": "earliest",
"msg.with.table.name": "true",
}
)
```
</TabItem>

View File

@ -33,11 +33,13 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con
|Python Connector 版本|主要变化|
|:-------------------:|:----:|
|2.7.12|1. 新增 varbinary 类型支持STMT暂不支持 varbinary <br/> 2. query 性能提升(感谢贡献者[hadrianl](https://github.com/taosdata/taos-connector-python/pull/209)|
|2.7.9|数据订阅支持获取消费进度和重置消费进度|
|2.7.8|新增 `execute_many`|
|Python Websocket Connector 版本|主要变化|
|:----------------------------:|:-----:|
|0.2.9|已知问题修复|
|0.2.5|1. 数据订阅支持获取消费进度和重置消费进度 <br/> 2. 支持 schemaless <br/> 3. 支持 STMT|
|0.2.4|数据订阅新增取消订阅方法|

View File

@ -24,7 +24,7 @@ SELECT [hints] [DISTINCT] select_list
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
hint:
BATCH_SCAN | NO_BATCH_SCAN
BATCH_SCAN | NO_BATCH_SCAN | SORT_FOR_GROUP
select_list:
select_expr [, select_expr] ...
@ -87,15 +87,17 @@ Hints 是用户控制单个语句查询优化的一种手段,当 Hint 不适
目前支持的 Hints 列表如下:
| **Hint** | **参数** | **说明** | **适用范围** |
| :-----------: | -------------- | -------------------------- | -------------------------- |
| BATCH_SCAN | 无 | 采用批量读表的方式 | 超级表 JOIN 语句 |
| NO_BATCH_SCAN | 无 | 采用顺序读表的方式 | 超级表 JOIN 语句 |
| **Hint** | **参数** | **说明** | **适用范围** |
| :-----------: | -------------- | -------------------------- | -----------------------------|
| BATCH_SCAN | 无 | 采用批量读表的方式 | 超级表 JOIN 语句 |
| NO_BATCH_SCAN | 无 | 采用顺序读表的方式 | 超级表 JOIN 语句 |
| SORT_FOR_GROUP| 无 | 采用sort方式进行分组 | partition by 列表有普通列时 |
举例:
```sql
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
SELECT /*+ SORT_FOR_GROUP() */ count(*), c1 FROM stable1 PARTITION BY c1;
```
## 列表

View File

@ -54,6 +54,7 @@ LIKE 条件使用通配符字符串进行匹配检查,规则如下:
MATCH 条件和 NMATCH 条件使用正则表达式进行匹配,规则如下:
- 支持符合 POSIX 规范的正则表达式,具体规范内容可参见 Regular Expressions。
- MATCH 和正则表达式匹配时, 返回 TURE. NMATCH 和正则表达式不匹配时, 返回 TRUE.
- 只能针对子表名(即 tbname、字符串类型的标签值进行正则表达式过滤不支持普通列的过滤。
- 正则匹配字符串长度不能超过 128 字节。可以通过参数 maxRegexStringLen 设置和调整最大允许的正则匹配字符串,该参数是客户端配置参数,需要重启客户端才能生效

View File

@ -180,6 +180,7 @@ description: TDengine 保留关键字的详细列表
- MAX_DELAY
- BWLIMIT
- MAXROWS
- MAX_SPEED
- MERGE
- META
- MINROWS

View File

@ -26,7 +26,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
## INS_DNODES
提供 dnode 的相关信息。也可以使用 SHOW DNODES 来查询这些信息。
提供 dnode 的相关信息。也可以使用 SHOW DNODES 来查询这些信息。 SYSINFO 为 0 的用户不能查看此表。
| # | **列名** | **数据类型** | **说明** |
| --- | :------------: | ------------ | ----------------------------------------------------------------------------------------------------- |
@ -40,7 +40,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
## INS_MNODES
提供 mnode 的相关信息。也可以使用 SHOW MNODES 来查询这些信息。
提供 mnode 的相关信息。也可以使用 SHOW MNODES 来查询这些信息。 SYSINFO 为 0 的用户不能查看此表。
| # | **列名** | **数据类型** | **说明** |
| --- | :---------: | ------------ | ------------------ |
@ -52,22 +52,33 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
## INS_QNODES
当前系统中 QNODE 的信息。也可以使用 SHOW QNODES 来查询这些信息。
当前系统中 QNODE 的信息。也可以使用 SHOW QNODES 来查询这些信息。SYSINFO 属性为 0 的用户不能查看此表。
| # | **列名** | **数据类型** | **说明** |
| --- | :---------: | ------------ | ------------ |
| 1 | id | SMALLINT | qnode id |
| 2 | endpoint | BINARY(134) | qnode 的地址 |
| 2 | endpoint | VARCHAR(134) | qnode 的地址 |
| 3 | create_time | TIMESTAMP | 创建时间 |
## INS_SNODES
当前系统中 SNODE 的信息。也可以使用 SHOW SNODES 来查询这些信息。SYSINFO 属性为 0 的用户不能查看此表。
| # | **列名** | **数据类型** | **说明** |
| --- | :---------: | ------------ | ------------ |
| 1 | id | SMALLINT | snode id |
| 2 | endpoint | VARCHAR(134) | snode 的地址 |
| 3 | create_time | TIMESTAMP | 创建时间 |
## INS_CLUSTER
存储集群相关信息。
存储集群相关信息。 SYSINFO 属性为 0 的用户不能查看此表。
| # | **列名** | **数据类型** | **说明** |
| --- | :---------: | ------------ | ---------- |
| 1 | id | BIGINT | cluster id |
| 2 | name | BINARY(134) | 集群名称 |
| 2 | name | VARCHAR(134) | 集群名称 |
| 3 | create_time | TIMESTAMP | 创建时间 |
## INS_DATABASES
@ -76,25 +87,25 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
| # | **列名** | **数据类型** | **说明** |
| --- | :------------------: | ---------------- | ------------------------------------------------ |
| 1 | name | BINARY(32) | 数据库名 |
| 1 | name | VARCHAR(64) | 数据库名 |
| 2 | create_time | TIMESTAMP | 创建时间 |
| 3 | ntables | INT | 数据库中表的数量,包含子表和普通表但不包含超级表 |
| 4 | vgroups | INT | 数据库中有多少个 vgroup。需要注意`vgroups` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 6 | replica | INT | 副本数。需要注意,`replica` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 7 | strict | BINARY(4) | 废弃参数 |
| 8 | duration | INT | 单文件存储数据的时间跨度。需要注意,`duration` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | keep | INT | 数据保留时长。需要注意,`keep` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 7 | strict | VARCHAR(4) | 废弃参数 |
| 8 | duration | VARCHAR(10) | 单文件存储数据的时间跨度。需要注意,`duration` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | keep | VARCHAR(32) | 数据保留时长。需要注意,`keep` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 10 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB。需要注意`buffer` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 11 | pagesize | INT | 每个 VNODE 中元数据存储引擎的页大小,单位为 KB。需要注意`pagesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 12 | pages | INT | 每个 vnode 元数据存储引擎的缓存页个数。需要注意,`pages` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 13 | minrows | INT | 文件块中记录的最大条数。需要注意,`minrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 14 | maxrows | INT | 文件块中记录的最小条数。需要注意,`maxrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 15 | comp | INT | 数据压缩方式。需要注意,`comp` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 16 | precision | BINARY(2) | 时间分辨率。需要注意,`precision` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 17 | status | BINARY(10) | 数据库状态 |
| 18 | retentions | BINARY (60) | 数据的聚合周期和保存时长。需要注意,`retentions` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 16 | precision | VARCHAR(2) | 时间分辨率。需要注意,`precision` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 17 | status | VARCHAR(10) | 数据库状态 |
| 18 | retentions | VARCHAR(60) | 数据的聚合周期和保存时长。需要注意,`retentions` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 19 | single_stable | BOOL | 表示此数据库中是否只可以创建一个超级表。需要注意,`single_stable` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 20 | cachemodel | BINARY(60) | 表示是否在内存中缓存子表的最近数据。需要注意,`cachemodel` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 20 | cachemodel | VARCHAR(60) | 表示是否在内存中缓存子表的最近数据。需要注意,`cachemodel` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 21 | cachesize | INT | 表示每个 vnode 中用于缓存子表最近数据的内存大小。需要注意,`cachesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 22 | wal_level | INT | WAL 级别。需要注意,`wal_level` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 23 | wal_fsync_period | INT | 数据落盘周期。需要注意,`wal_fsync_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
@ -111,15 +122,15 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
| # | **列名** | **数据类型** | **说明** |
| --- | :-----------: | ------------- | --------------------------------------------------------------------------------------------- |
| 1 | name | BINARY(64) | 函数名 |
| 2 | comment | BINARY(255) | 补充说明。需要注意,`comment` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 1 | name | VARCHAR(64) | 函数名 |
| 2 | comment | VARCHAR(255) | 补充说明。需要注意,`comment` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 3 | aggregate | INT | 是否为聚合函数。需要注意,`aggregate` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 4 | output_type | BINARY(31) | 输出类型 |
| 4 | output_type | VARCHAR(31) | 输出类型 |
| 5 | create_time | TIMESTAMP | 创建时间 |
| 6 | code_len | INT | 代码长度 |
| 7 | bufsize | INT | buffer 大小 |
| 8 | func_language | BINARY(31) | 自定义函数编程语言 |
| 9 | func_body | BINARY(16384) | 函数体定义 |
| 8 | func_language | VARCHAR(31) | 自定义函数编程语言 |
| 9 | func_body | VARCHAR(16384) | 函数体定义 |
| 10 | func_version | INT | 函数版本号。初始版本为0每次替换更新版本号加1。 |
@ -129,12 +140,12 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
| # | **列名** | **数据类型** | **说明** |
| --- | :--------------: | ------------ | ------------------------------------------------------- |
| 1 | db_name | BINARY(32) | 包含此索引的表所在的数据库名 |
| 2 | table_name | BINARY(192) | 包含此索引的表的名称 |
| 3 | index_name | BINARY(192) | 索引名 |
| 4 | column_name | BINARY(64) | 建索引的列的列名 |
| 5 | index_type | BINARY(10) | 目前有 SMA 和 tag |
| 6 | index_extensions | BINARY(256) | 索引的额外信息。对 SMA/tag 类型的索引,是函数名的列表。 |
| 1 | db_name | VARCHAR(32) | 包含此索引的表所在的数据库名 |
| 2 | table_name | VARCHAR(192) | 包含此索引的表的名称 |
| 3 | index_name | VARCHAR(192) | 索引名 |
| 4 | column_name | VARCHAR(64) | 建索引的列的列名 |
| 5 | index_type | VARCHAR(10) | 目前有 SMA 和 tag |
| 6 | index_extensions | VARCHAR(256) | 索引的额外信息。对 SMA/tag 类型的索引,是函数名的列表。 |
## INS_STABLES
@ -142,16 +153,16 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
| # | **列名** | **数据类型** | **说明** |
| --- | :-----------: | ------------ | ----------------------------------------------------------------------------------------------------- |
| 1 | stable_name | BINARY(192) | 超级表表名 |
| 2 | db_name | BINARY(64) | 超级表所在的数据库的名称 |
| 1 | stable_name | VARCHAR(192) | 超级表表名 |
| 2 | db_name | VARCHAR(64) | 超级表所在的数据库的名称 |
| 3 | create_time | TIMESTAMP | 创建时间 |
| 4 | columns | INT | 列数目 |
| 5 | tags | INT | 标签数目。需要注意,`tags` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 6 | last_update | TIMESTAMP | 最后更新时间 |
| 7 | table_comment | BINARY(1024) | 表注释 |
| 8 | watermark | BINARY(64) | 窗口的关闭时间。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | max_delay | BINARY(64) | 推送计算结果的最大延迟。需要注意,`max_delay` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 10 | rollup | BINARY(128) | rollup 聚合函数。需要注意,`rollup` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 7 | table_comment | VARCHAR(1024) | 表注释 |
| 8 | watermark | VARCHAR(64) | 窗口的关闭时间。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | max_delay | VARCHAR(64) | 推送计算结果的最大延迟。需要注意,`max_delay` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 10 | rollup | VARCHAR(128) | rollup 聚合函数。需要注意,`rollup` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
## INS_TABLES
@ -159,37 +170,37 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
| # | **列名** | **数据类型** | **说明** |
| --- | :-----------: | ------------ | ------------------------------------------------------------------------------------- |
| 1 | table_name | BINARY(192) | 表名 |
| 2 | db_name | BINARY(64) | 数据库名 |
| 1 | table_name | VARCHAR(192) | 表名 |
| 2 | db_name | VARCHAR(64) | 数据库名 |
| 3 | create_time | TIMESTAMP | 创建时间 |
| 4 | columns | INT | 列数目 |
| 5 | stable_name | BINARY(192) | 所属的超级表表名 |
| 5 | stable_name | VARCHAR(192) | 所属的超级表表名 |
| 6 | uid | BIGINT | 表 id |
| 7 | vgroup_id | INT | vgroup id |
| 8 | ttl | INT | 表的生命周期。需要注意,`ttl` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | table_comment | BINARY(1024) | 表注释 |
| 10 | type | BINARY(21) | 表类型 |
| 9 | table_comment | VARCHAR(1024) | 表注释 |
| 10 | type | VARCHAR(21) | 表类型 |
## INS_TAGS
| # | **列名** | **数据类型** | **说明** |
| --- | :---------: | ------------- | ---------------------- |
| 1 | table_name | BINARY(192) | 表名 |
| 2 | db_name | BINARY(64) | 该表所在的数据库的名称 |
| 3 | stable_name | BINARY(192) | 所属的超级表表名 |
| 4 | tag_name | BINARY(64) | tag 的名称 |
| 5 | tag_type | BINARY(64) | tag 的类型 |
| 6 | tag_value | BINARY(16384) | tag 的值 |
| 1 | table_name | VARCHAR(192) | 表名 |
| 2 | db_name | VARCHAR(64) | 该表所在的数据库的名称 |
| 3 | stable_name | VARCHAR(192) | 所属的超级表表名 |
| 4 | tag_name | VARCHAR(64) | tag 的名称 |
| 5 | tag_type | VARCHAR(64) | tag 的类型 |
| 6 | tag_value | VARCHAR(16384) | tag 的值 |
## INS_COLUMNS
| # | **列名** | **数据类型** | **说明** |
| --- | :-----------: | ------------ | ---------------------- |
| 1 | table_name | BINARY(192) | 表名 |
| 2 | db_name | BINARY(64) | 该表所在的数据库的名称 |
| 3 | table_type | BINARY(21) | 表类型 |
| 4 | col_name | BINARY(64) | 列 的名称 |
| 5 | col_type | BINARY(32) | 列 的类型 |
| 1 | table_name | VARCHAR(192) | 表名 |
| 2 | db_name | VARCHAR(64) | 该表所在的数据库的名称 |
| 3 | table_type | VARCHAR(21) | 表类型 |
| 4 | col_name | VARCHAR(64) | 列 的名称 |
| 5 | col_type | VARCHAR(32) | 列 的类型 |
| 6 | col_length | INT | 列 的长度 |
| 7 | col_precision | INT | 列 的精度 |
| 8 | col_scale | INT | 列 的比例 |
@ -197,51 +208,51 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
## INS_USERS
提供系统中创建的用户的相关信息。
提供系统中创建的用户的相关信息. SYSINFO 属性为0 的用户不能查看此表
| # | **列名** | **数据类型** | **说明** |
| --- | :---------: | ------------ | -------- |
| 1 | user_name | BINARY(23) | 用户名 |
| 2 | privilege | BINARY(256) | 权限 |
| 1 | user_name | VARCHAR(23) | 用户名 |
| 2 | privilege | VARCHAR(256) | 权限 |
| 3 | create_time | TIMESTAMP | 创建时间 |
## INS_GRANTS
提供企业版授权的相关信息。
提供企业版授权的相关信息。SYSINFO 属性为 0 的用户不能查看此表。
| # | **列名** | **数据类型** | **说明** |
| --- | :---------: | ------------ | --------------------------------------------------------------------------------------------------------- |
| 1 | version | BINARY(9) | 企业版授权说明official(官方授权的)/trial(试用的) |
| 2 | cpu_cores | BINARY(9) | 授权使用的 CPU 核心数量 |
| 3 | dnodes | BINARY(10) | 授权使用的 dnode 节点数量。需要注意,`dnodes` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 4 | streams | BINARY(10) | 授权创建的流数量。需要注意,`streams` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 5 | users | BINARY(10) | 授权创建的用户数量。需要注意,`users` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 6 | accounts | BINARY(10) | 授权创建的帐户数量。需要注意,`accounts` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 7 | storage | BINARY(21) | 授权使用的存储空间大小。需要注意,`storage` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 8 | connections | BINARY(21) | 授权使用的客户端连接数量。需要注意,`connections` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | databases | BINARY(11) | 授权使用的数据库数量。需要注意,`databases` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 10 | speed | BINARY(9) | 授权使用的数据点每秒写入数量 |
| 11 | querytime | BINARY(9) | 授权使用的查询总时长 |
| 12 | timeseries | BINARY(21) | 授权使用的测点数量 |
| 13 | expired | BINARY(5) | 是否到期true到期false未到期 |
| 14 | expire_time | BINARY(19) | 试用期到期时间 |
| 1 | version | VARCHAR(9) | 企业版授权说明official(官方授权的)/trial(试用的) |
| 2 | cpu_cores | VARCHAR(9) | 授权使用的 CPU 核心数量 |
| 3 | dnodes | VARCHAR(10) | 授权使用的 dnode 节点数量。需要注意,`dnodes` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 4 | streams | VARCHAR(10) | 授权创建的流数量。需要注意,`streams` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 5 | users | VARCHAR(10) | 授权创建的用户数量。需要注意,`users` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 6 | accounts | VARCHAR(10) | 授权创建的帐户数量。需要注意,`accounts` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 7 | storage | VARCHAR(21) | 授权使用的存储空间大小。需要注意,`storage` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 8 | connections | VARCHAR(21) | 授权使用的客户端连接数量。需要注意,`connections` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | databases | VARCHAR(11) | 授权使用的数据库数量。需要注意,`databases` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 10 | speed | VARCHAR(9) | 授权使用的数据点每秒写入数量 |
| 11 | querytime | VARCHAR(9) | 授权使用的查询总时长 |
| 12 | timeseries | VARCHAR(21) | 授权使用的测点数量 |
| 13 | expired | VARCHAR(5) | 是否到期true到期false未到期 |
| 14 | expire_time | VARCHAR(19) | 试用期到期时间 |
## INS_VGROUPS
系统中所有 vgroups 的信息。
系统中所有 vgroups 的信息。SYSINFO 属性为 0 的用户不能查看此表。
| # | **列名** | **数据类型** | **说明** |
| --- | :-------: | ------------ | ------------------------------------------------------------------------------------------------ |
| 1 | vgroup_id | INT | vgroup id |
| 2 | db_name | BINARY(32) | 数据库名 |
| 2 | db_name | VARCHAR(32) | 数据库名 |
| 3 | tables | INT | 此 vgroup 内有多少表。需要注意,`tables` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 4 | status | BINARY(10) | 此 vgroup 的状态 |
| 4 | status | VARCHAR(10) | 此 vgroup 的状态 |
| 5 | v1_dnode | INT | 第一个成员所在的 dnode 的 id |
| 6 | v1_status | BINARY(10) | 第一个成员的状态 |
| 6 | v1_status | VARCHAR(10) | 第一个成员的状态 |
| 7 | v2_dnode | INT | 第二个成员所在的 dnode 的 id |
| 8 | v2_status | BINARY(10) | 第二个成员的状态 |
| 8 | v2_status | VARCHAR(10) | 第二个成员的状态 |
| 9 | v3_dnode | INT | 第三个成员所在的 dnode 的 id |
| 10 | v3_status | BINARY(10) | 第三个成员的状态 |
| 10 | v3_status | VARCHAR(10) | 第三个成员的状态 |
| 11 | nfiles | INT | 此 vgroup 中数据/元数据文件的数量 |
| 12 | file_size | INT | 此 vgroup 中数据/元数据文件的大小 |
| 13 | tsma | TINYINT | 此 vgroup 是否专用于 Time-range-wise SMA1: 是, 0: 否 |
@ -252,55 +263,57 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
| # | **列名** | **数据类型** | **说明** |
| --- | :------: | ------------ | --------------------------------------------------------------------------------------- |
| 1 | name | BINARY(32) | 配置项名称 |
| 2 | value | BINARY(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 1 | name | VARCHAR(32) | 配置项名称 |
| 2 | value | VARCHAR(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
## INS_DNODE_VARIABLES
系统中每个 dnode 的配置参数。
系统中每个 dnode 的配置参数。SYSINFO 属性 为 0 的用户不能查看此表。
| # | **列名** | **数据类型** | **说明** |
| --- | :------: | ------------ | --------------------------------------------------------------------------------------- |
| 1 | dnode_id | INT | dnode 的 ID |
| 2 | name | BINARY(32) | 配置项名称 |
| 3 | value | BINARY(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 2 | name | VARCHAR(32) | 配置项名称 |
| 3 | value | VARCHAR(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
## INS_TOPICS
| # | **列名** | **数据类型** | **说明** |
| --- | :---------: | ------------ | ------------------------------ |
| 1 | topic_name | BINARY(192) | topic 名称 |
| 2 | db_name | BINARY(64) | topic 相关的 DB |
| 1 | topic_name | VARCHAR(192) | topic 名称 |
| 2 | db_name | VARCHAR(64) | topic 相关的 DB |
| 3 | create_time | TIMESTAMP | topic 的 创建时间 |
| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 |
| 4 | sql | VARCHAR(1024) | 创建该 topic 时所用的 SQL 语句 |
## INS_SUBSCRIPTIONS
| # | **列名** | **数据类型** | **说明** |
| --- | :------------: | ------------ | ------------------------ |
| 1 | topic_name | BINARY(204) | 被订阅的 topic |
| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 |
| 1 | topic_name | VARCHAR(204) | 被订阅的 topic |
| 2 | consumer_group | VARCHAR(193) | 订阅者的消费者组 |
| 3 | vgroup_id | INT | 消费者被分配的 vgroup id |
| 4 | consumer_id | BIGINT | 消费者的唯一 id |
| 5 | offset | BINARY(64) | 消费者的消费进度 |
| 5 | offset | VARCHAR(64) | 消费者的消费进度 |
| 6 | rows | BIGINT | 消费者的消费的数据条数 |
## INS_STREAMS
| # | **列名** | **数据类型** | **说明** |
| --- | :----------: | ------------ | -------------------------------------------------------------------------------------------------------------------- |
| 1 | stream_name | BINARY(64) | 流计算名称 |
| 1 | stream_name | VARCHAR(64) | 流计算名称 |
| 2 | create_time | TIMESTAMP | 创建时间 |
| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 |
| 4 | status | BINARY(20) | 流当前状态 |
| 5 | source_db | BINARY(64) | 源数据库 |
| 6 | target_db | BINARY(64) | 目的数据库 |
| 7 | target_table | BINARY(192) | 流计算写入的目标表 |
| 3 | sql | VARCHAR(1024) | 创建流计算时提供的 SQL 语句 |
| 4 | status | VARCHAR(20) | 流当前状态 |
| 5 | source_db | VARCHAR(64) | 源数据库 |
| 6 | target_db | VARCHAR(64) | 目的数据库 |
| 7 | target_table | VARCHAR(192) | 流计算写入的目标表 |
| 8 | watermark | BIGINT | watermark详见 SQL 手册流式计算。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算。需要注意,`trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
## INS_USER_PRIVILEGES
SYSINFO 属性为 0 的用户不能查看此表。
| # | **列名** | **数据类型** | **说明** |
| --- | :----------: | ------------ | -------------------------------------------------------------------------------------------------------------------- |
| 1 | user_name | VARCHAR(24) | 用户名

View File

@ -73,10 +73,10 @@ SHOW CREATE TABLE [db_name.]tb_name
## SHOW DATABASES
```sql
SHOW DATABASES;
SHOW [USER | SYSTEM] DATABASES;
```
显示用户定义的所有数据库。
显示定义的所有数据库。SYSTEM 指定只显示系统数据库。USER 指定只显示用户创建的数据库。
## SHOW DNODES
@ -183,10 +183,10 @@ SHOW SUBSCRIPTIONS;
## SHOW TABLES
```sql
SHOW [db_name.]TABLES [LIKE 'pattern'];
SHOW [NORMAL | CHILD] [db_name.]TABLES [LIKE 'pattern'];
```
显示当前数据库下的所有普通表和子表的信息。可以使用 LIKE 对表名进行模糊匹配。
显示当前数据库下的所有普通表和子表的信息。可以使用 LIKE 对表名进行模糊匹配。NORMAL 指定只显示普通表信息, CHILD 指定只显示子表信息。
## SHOW TABLE DISTRIBUTED

View File

@ -395,6 +395,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
### 查询场景配置参数
查询场景下 `filetype` 必须设置为 `query`
`query_times` 指定运行查询的次数,数值类型
查询场景可以通过设置 `kill_slow_query_threshold``kill_slow_query_interval` 参数来控制杀掉慢查询语句的执行threshold 控制如果 exec_usec 超过指定时间的查询将被 taosBenchmark 杀掉单位为秒interval 控制休眠时间,避免持续查询慢查询消耗 CPU ,单位为秒。

View File

@ -106,7 +106,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
use letter and number only. Default is NOT.
-n, --no-escape No escape char '`'. Default is using it.
-Q, --dot-replace Repalce dot character with underline character in
the table name.
the table name.(Version 2.5.3)
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
8.
-C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service
@ -116,6 +116,10 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
-?, --help Give this help list
--usage Give a short usage message
-V, --version Print program version
-W, --rename=RENAME-LIST Rename database name with new name during
importing data. RENAME-LIST:
"db1=newDB1|db2=newDB2" means rename db1 to newDB1
and rename db2 to newDB2 (Version 2.5.4)
Mandatory or optional arguments to long options are also mandatory or optional
for any corresponding short options.

View File

@ -9,8 +9,6 @@ TDengine 通过 [taosKeeper](/reference/taosKeeper/) 将服务器的 CPU、内
## TDinsight - 使用监控数据库 + Grafana 对 TDengine 进行监控的解决方案
监控数据库将提供更多的监控项,您可以从 [TDinsight Grafana Dashboard](/reference/tdinsight/) 了解如何使用 TDinsight 方案对 TDengine 进行监控。
我们提供了一个自动化脚本 `TDinsight.sh` 对 TDinsight 进行部署。
下载 `TDinsight.sh`
@ -37,8 +35,6 @@ chmod +x TDinsight.sh
运行程序并重启 Grafana 服务,打开面板:`http://localhost:3000/d/tdinsight`。
更多使用场景和限制请参考[TDinsight](/reference/tdinsight/) 文档。
## log 库
TDinsight dashboard 数据来源于 log 库存放监控数据的默认db可以在 taoskeeper 配置文件中修改,具体参考 [taoskeeper 文档](/reference/taosKeeper)。taoskeeper 启动后会自动创建 log 库,并将监控数据写入到该数据库中。
@ -102,22 +98,22 @@ TDinsight dashboard 数据来源于 log 库存放监控数据的默认db
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|uptime|FLOAT||dnode uptime|
|uptime|FLOAT||dnode uptime,单位:天|
|cpu\_engine|FLOAT||taosd cpu 使用率,从 `/proc/<taosd_pid>/stat` 读取|
|cpu\_system|FLOAT||服务器 cpu 使用率,从 `/proc/stat` 读取|
|cpu\_cores|FLOAT||服务器 cpu 核数|
|mem\_engine|INT||taosd 内存使用率,从 `/proc/<taosd_pid>/status` 读取|
|mem\_system|INT||服务器可用内存|
|mem\_system|INT||服务器可用内存,单位 KB|
|mem\_total|INT||服务器内存总量,单位 KB|
|disk\_engine|INT|||
|disk\_engine|INT||单位 bytes|
|disk\_used|BIGINT||data dir 挂载的磁盘使用量,单位 bytes|
|disk\_total|BIGINT||data dir 挂载的磁盘总容量,单位 bytes|
|net\_in|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 received bytes。单位 kb/s|
|net\_out|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 transmit bytes。单位 kb/s|
|io\_read|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 rchar 与上次数值计算之后,计算得到速度。单位 kb/s|
|io\_write|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 wchar 与上次数值计算之后,计算得到速度。单位 kb/s|
|io\_read\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 read_bytes。单位 kb/s|
|io\_write\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 write_bytes。单位 kb/s|
|net\_in|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 received bytes。单位 byte/s|
|net\_out|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 transmit bytes。单位 byte/s|
|io\_read|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 rchar 与上次数值计算之后,计算得到速度。单位 byte/s|
|io\_write|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 wchar 与上次数值计算之后,计算得到速度。单位 byte/s|
|io\_read\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 read_bytes。单位 byte/s|
|io\_write\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 write_bytes。单位 byte/s|
|req\_select|INT||两个间隔内发生的查询请求数目|
|req\_select\_rate|FLOAT||两个间隔内的查询请求速度 = `req_select / monitorInterval`|
|req\_insert|INT||两个间隔内发生的写入请求,包含的单条数据数目|
@ -146,9 +142,9 @@ TDinsight dashboard 数据来源于 log 库存放监控数据的默认db
|ts|TIMESTAMP||timestamp|
|name|NCHAR||data 目录,一般为 `/var/lib/taos`|
|level|INT||0、1、2 多级存储级别|
|avail|BIGINT||data 目录可用空间|
|used|BIGINT||data 目录已使用空间|
|total|BIGINT||data 目录空间|
|avail|BIGINT||data 目录可用空间。单位 byte|
|used|BIGINT||data 目录已使用空间。单位 byte|
|total|BIGINT||data 目录空间。单位 byte|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
@ -161,9 +157,9 @@ TDinsight dashboard 数据来源于 log 库存放监控数据的默认db
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||log 目录名,一般为 `/var/log/taos/`|
|avail|BIGINT||log 目录可用空间|
|used|BIGINT||log 目录已使用空间|
|total|BIGINT||log 目录空间|
|avail|BIGINT||log 目录可用空间。单位 byte|
|used|BIGINT||log 目录已使用空间。单位 byte|
|total|BIGINT||log 目录空间。单位 byte|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
@ -176,9 +172,9 @@ TDinsight dashboard 数据来源于 log 库存放监控数据的默认db
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||temp 目录名,一般为 `/tmp/`|
|avail|BIGINT||temp 目录可用空间|
|used|BIGINT||temp 目录已使用空间|
|total|BIGINT||temp 目录空间|
|avail|BIGINT||temp 目录可用空间。单位 byte|
|used|BIGINT||temp 目录已使用空间。单位 byte|
|total|BIGINT||temp 目录空间。单位 byte|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|

View File

@ -14,40 +14,7 @@ Seeq 是制造业和工业互联网IIOT高级分析软件。Seeq 支持在
### Seeq 安装方法
从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。
### Seeq Server 安装和启动
```
tar xvzf seeq-server-xxx.tar.gz
cd seeq-server-installer
sudo ./install
sudo seeq service enable
sudo seeq start
```
### Seeq Data Lab Server 安装和启动
Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见[Seeq 官方文档](https://support.seeq.com/space/KB/1034059842)。
```
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq
sudo seeq config set Network/DataLab/Hostname localhost
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231)
sudo seeq config set Network/Hostname <value> # the host IP or URL of the main Seeq Server
# If the main Seeq server is configured to listen over HTTPS
sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443)
# If the main Seeq server is NOT configured to listen over HTTPS
sudo seeq config set Network/Webserver/Port <value>
#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server:
sudo seeq config set Network/DataLab/Hostname <value> # the host IP (not URL) of the Data Lab server
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231
```
从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见[Seeq 知识库]( https://support.seeq.com/kb/latest/cloud/)。
## TDengine 本地实例安装方法

View File

@ -44,17 +44,17 @@ OS name: "windows 10", version: "10.0", arch: "amd64", family: "windows"
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
<!-- 配置本地maven仓库的路径 -->
<!-- 配置本地maven仓库的路径 -->
<localRepository>D:\apache-maven-localRepository</localRepository>
<mirrors>
<!-- 配置阿里云Maven镜像仓库 -->
<mirror>
<id>alimaven</id>
<name>aliyun maven</name>
<url>http://maven.aliyun.com/nexus/content/groups/public/</url>
<mirrorOf>central</mirrorOf>
</mirror>
<mirror>
<id>alimaven</id>
<name>aliyun maven</name>
<url>http://maven.aliyun.com/nexus/content/groups/public/</url>
<mirrorOf>central</mirrorOf>
</mirror>
</mirrors>
<profiles>
@ -126,7 +126,7 @@ https://www.taosdata.com/cn/all-downloads/
修改client的hosts文件C:\Windows\System32\drivers\etc\hosts将server的hostname和ip配置到client的hosts文件中
```
192.168.236.136 td01
192.168.236.136 td01
```
配置完成后在命令行内使用TDengine CLI连接server端

3
examples/go/BUILD.md Normal file
View File

@ -0,0 +1,3 @@
go mod init demo
go mod tidy
go build

View File

@ -108,7 +108,7 @@ int32_t tBufferReserve(SBuffer *pBuffer, int64_t nData, void **ppData);
int32_t tRowBuild(SArray *aColVal, const STSchema *pTSchema, SRow **ppRow);
int32_t tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal);
void tRowDestroy(SRow *pRow);
void tRowSort(SArray *aRowP);
int32_t tRowSort(SArray *aRowP);
int32_t tRowMerge(SArray *aRowP, STSchema *pTSchema, int8_t flag);
int32_t tRowUpsertColData(SRow *pRow, STSchema *pTSchema, SColData *aColData, int32_t nColData, int32_t flag);

View File

@ -51,8 +51,10 @@ typedef enum {
} EGrantType;
int32_t grantCheck(EGrantType grant);
#ifdef TD_ENTERPRISE
#ifndef TD_GRANT_OPTIMIZE
int32_t grantAlterActiveCode(const char* old, const char* new, char* out, int8_t type);
#else
int32_t grantAlterActiveCode(int32_t did, const char* old, const char* new, char* out, int8_t type);
#endif
#ifndef GRANTS_CFG

View File

@ -768,6 +768,8 @@ typedef struct {
char* pAst2;
int64_t deleteMark1;
int64_t deleteMark2;
int32_t sqlLen;
char* sql;
} SMCreateStbReq;
int32_t tSerializeSMCreateStbReq(void* buf, int32_t bufLen, SMCreateStbReq* pReq);
@ -788,10 +790,13 @@ typedef struct {
int8_t source; // 1-taosX or 0-taosClient
int8_t reserved[6];
tb_uid_t suid;
int32_t sqlLen;
char* sql;
} SMDropStbReq;
int32_t tSerializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq);
int32_t tDeserializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq);
void tFreeSMDropStbReq(SMDropStbReq *pReq);
typedef struct {
char name[TSDB_TABLE_FNAME_LEN];
@ -801,6 +806,8 @@ typedef struct {
int32_t ttl;
int32_t commentLen;
char* comment;
int32_t sqlLen;
char* sql;
} SMAlterStbReq;
int32_t tSerializeSMAlterStbReq(void* buf, int32_t bufLen, SMAlterStbReq* pReq);
@ -871,10 +878,13 @@ int32_t tDeserializeSCreateAcctReq(void* buf, int32_t bufLen, SCreateAcctReq* pR
typedef struct {
char user[TSDB_USER_LEN];
int32_t sqlLen;
char* sql;
} SDropUserReq, SDropAcctReq;
int32_t tSerializeSDropUserReq(void* buf, int32_t bufLen, SDropUserReq* pReq);
int32_t tDeserializeSDropUserReq(void* buf, int32_t bufLen, SDropUserReq* pReq);
void tFreeSDropUserReq(SDropUserReq *pReq);
typedef struct SIpV4Range{
uint32_t ip;
@ -888,19 +898,21 @@ typedef struct {
SIpWhiteList* cloneIpWhiteList(SIpWhiteList* pIpWhiteList);
typedef struct {
int8_t createType;
int8_t superUser; // denote if it is a super user or not
int8_t sysInfo;
int8_t enable;
char user[TSDB_USER_LEN];
char pass[TSDB_USET_PASSWORD_LEN];
int8_t createType;
int8_t superUser; // denote if it is a super user or not
int8_t sysInfo;
int8_t enable;
char user[TSDB_USER_LEN];
char pass[TSDB_USET_PASSWORD_LEN];
int32_t numIpRanges;
SIpV4Range* pIpRanges;
int32_t sqlLen;
char* sql;
} SCreateUserReq;
int32_t tSerializeSCreateUserReq(void* buf, int32_t bufLen, SCreateUserReq* pReq);
int32_t tDeserializeSCreateUserReq(void* buf, int32_t bufLen, SCreateUserReq* pReq);
void tFreeSCreateUserReq(SCreateUserReq* pReq);
void tFreeSCreateUserReq(SCreateUserReq *pReq);
typedef struct {
int64_t ver;
@ -927,18 +939,20 @@ int32_t tSerializeRetrieveIpWhite(void* buf, int32_t bufLen, SRetrieveIpWhiteReq
int32_t tDeserializeRetrieveIpWhite(void* buf, int32_t bufLen, SRetrieveIpWhiteReq* pReq);
typedef struct {
int8_t alterType;
int8_t superUser;
int8_t sysInfo;
int8_t enable;
char user[TSDB_USER_LEN];
char pass[TSDB_USET_PASSWORD_LEN];
char objname[TSDB_DB_FNAME_LEN]; // db or topic
char tabName[TSDB_TABLE_NAME_LEN];
char* tagCond;
int32_t tagCondLen;
int8_t alterType;
int8_t superUser;
int8_t sysInfo;
int8_t enable;
char user[TSDB_USER_LEN];
char pass[TSDB_USET_PASSWORD_LEN];
char objname[TSDB_DB_FNAME_LEN]; // db or topic
char tabName[TSDB_TABLE_NAME_LEN];
char* tagCond;
int32_t tagCondLen;
int32_t numIpRanges;
SIpV4Range* pIpRanges;
int32_t sqlLen;
char* sql;
} SAlterUserReq;
int32_t tSerializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
@ -1118,6 +1132,8 @@ typedef struct {
int16_t hashPrefix;
int16_t hashSuffix;
int32_t tsdbPageSize;
int32_t sqlLen;
char* sql;
} SCreateDbReq;
int32_t tSerializeSCreateDbReq(void* buf, int32_t bufLen, SCreateDbReq* pReq);
@ -1144,18 +1160,24 @@ typedef struct {
int32_t minRows;
int32_t walRetentionPeriod;
int32_t walRetentionSize;
int32_t sqlLen;
char* sql;
} SAlterDbReq;
int32_t tSerializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq);
int32_t tDeserializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq);
void tFreeSAlterDbReq(SAlterDbReq* pReq);
typedef struct {
char db[TSDB_DB_FNAME_LEN];
int8_t ignoreNotExists;
int32_t sqlLen;
char* sql;
} SDropDbReq;
int32_t tSerializeSDropDbReq(void* buf, int32_t bufLen, SDropDbReq* pReq);
int32_t tDeserializeSDropDbReq(void* buf, int32_t bufLen, SDropDbReq* pReq);
void tFreeSDropDbReq(SDropDbReq* pReq);
typedef struct {
char db[TSDB_DB_FNAME_LEN];
@ -1350,10 +1372,13 @@ void tFreeSUserAuthBatchRsp(SUserAuthBatchRsp* pRsp);
typedef struct {
char db[TSDB_DB_FNAME_LEN];
STimeWindow timeRange;
int32_t sqlLen;
char* sql;
} SCompactDbReq;
int32_t tSerializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq);
int32_t tDeserializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq);
void tFreeSCompactDbReq(SCompactDbReq *pReq);
typedef struct {
char name[TSDB_FUNC_NAME_LEN];
@ -1933,10 +1958,13 @@ void tFreeSExplainRsp(SExplainRsp* pRsp);
typedef struct {
char fqdn[TSDB_FQDN_LEN]; // end point, hostname:port
int32_t port;
int32_t sqlLen;
char* sql;
} SCreateDnodeReq;
int32_t tSerializeSCreateDnodeReq(void* buf, int32_t bufLen, SCreateDnodeReq* pReq);
int32_t tDeserializeSCreateDnodeReq(void* buf, int32_t bufLen, SCreateDnodeReq* pReq);
void tFreeSCreateDnodeReq(SCreateDnodeReq* pReq);
typedef struct {
int32_t dnodeId;
@ -1944,10 +1972,13 @@ typedef struct {
int32_t port;
int8_t force;
int8_t unsafe;
int32_t sqlLen;
char* sql;
} SDropDnodeReq;
int32_t tSerializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
int32_t tDeserializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
void tFreeSDropDnodeReq(SDropDnodeReq* pReq);
enum {
RESTORE_TYPE__ALL = 1,
@ -1959,19 +1990,25 @@ enum {
typedef struct {
int32_t dnodeId;
int8_t restoreType;
int32_t sqlLen;
char* sql;
} SRestoreDnodeReq;
int32_t tSerializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq);
int32_t tDeserializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq);
void tFreeSRestoreDnodeReq(SRestoreDnodeReq *pReq);
typedef struct {
int32_t dnodeId;
char config[TSDB_DNODE_CONFIG_LEN];
char value[TSDB_DNODE_VALUE_LEN];
int32_t sqlLen;
char* sql;
} SMCfgDnodeReq;
int32_t tSerializeSMCfgDnodeReq(void* buf, int32_t bufLen, SMCfgDnodeReq* pReq);
int32_t tDeserializeSMCfgDnodeReq(void* buf, int32_t bufLen, SMCfgDnodeReq* pReq);
void tFreeSMCfgDnodeReq(SMCfgDnodeReq *pReq);
typedef struct {
char config[TSDB_DNODE_CONFIG_LEN];
@ -1983,12 +2020,15 @@ int32_t tDeserializeSDCfgDnodeReq(void* buf, int32_t bufLen, SDCfgDnodeReq* pReq
typedef struct {
int32_t dnodeId;
int32_t sqlLen;
char* sql;
} SMCreateMnodeReq, SMDropMnodeReq, SDDropMnodeReq, SMCreateQnodeReq, SMDropQnodeReq, SDCreateQnodeReq, SDDropQnodeReq,
SMCreateSnodeReq, SMDropSnodeReq, SDCreateSnodeReq, SDDropSnodeReq;
int32_t tSerializeSCreateDropMQSNodeReq(void* buf, int32_t bufLen, SMCreateQnodeReq* pReq);
int32_t tDeserializeSCreateDropMQSNodeReq(void* buf, int32_t bufLen, SMCreateQnodeReq* pReq);
void tFreeSMCreateQnodeReq(SMCreateQnodeReq *pReq);
void tFreeSDDropQnodeReq(SDDropQnodeReq* pReq);
typedef struct {
int8_t replica;
SReplica replicas[TSDB_MAX_REPLICA];
@ -2023,10 +2063,13 @@ int32_t tDeserializeSKillTransReq(void* buf, int32_t bufLen, SKillTransReq* pReq
typedef struct {
int32_t useless; // useless
int32_t sqlLen;
char* sql;
} SBalanceVgroupReq;
int32_t tSerializeSBalanceVgroupReq(void* buf, int32_t bufLen, SBalanceVgroupReq* pReq);
int32_t tDeserializeSBalanceVgroupReq(void* buf, int32_t bufLen, SBalanceVgroupReq* pReq);
void tFreeSBalanceVgroupReq(SBalanceVgroupReq *pReq);
typedef struct {
int32_t vgId1;
@ -2041,17 +2084,24 @@ typedef struct {
int32_t dnodeId1;
int32_t dnodeId2;
int32_t dnodeId3;
int32_t sqlLen;
char* sql;
} SRedistributeVgroupReq;
int32_t tSerializeSRedistributeVgroupReq(void* buf, int32_t bufLen, SRedistributeVgroupReq* pReq);
int32_t tDeserializeSRedistributeVgroupReq(void* buf, int32_t bufLen, SRedistributeVgroupReq* pReq);
void tFreeSRedistributeVgroupReq(SRedistributeVgroupReq *pReq);
typedef struct {
int32_t useless;
int32_t vgId;
int32_t sqlLen;
char* sql;
} SBalanceVgroupLeaderReq;
int32_t tSerializeSBalanceVgroupLeaderReq(void* buf, int32_t bufLen, SBalanceVgroupLeaderReq* pReq);
int32_t tDeserializeSBalanceVgroupLeaderReq(void* buf, int32_t bufLen, SBalanceVgroupLeaderReq* pReq);
void tFreeSBalanceVgroupLeaderReq(SBalanceVgroupLeaderReq *pReq);
typedef struct {
int32_t vgId;
@ -2445,10 +2495,13 @@ typedef struct {
typedef struct {
char name[TSDB_TOPIC_FNAME_LEN];
int8_t igNotExists;
int32_t sqlLen;
char* sql;
} SMDropTopicReq;
int32_t tSerializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
int32_t tDeserializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
void tFreeSMDropTopicReq(SMDropTopicReq *pReq);
typedef struct {
char topic[TSDB_TOPIC_FNAME_LEN];
@ -2544,6 +2597,8 @@ typedef struct SVCreateTbReq {
SSchemaWrapper schemaRow;
} ntb;
};
int32_t sqlLen;
char* sql;
} SVCreateTbReq;
int tEncodeSVCreateTbReq(SEncoder* pCoder, const SVCreateTbReq* pReq);
@ -2555,6 +2610,7 @@ static FORCE_INLINE void tdDestroySVCreateTbReq(SVCreateTbReq* req) {
return;
}
taosMemoryFreeClear(req->sql);
taosMemoryFreeClear(req->name);
taosMemoryFreeClear(req->comment);
if (req->type == TSDB_CHILD_TABLE) {
@ -3018,6 +3074,8 @@ typedef struct {
typedef struct {
char name[TSDB_STREAM_FNAME_LEN];
int8_t igNotExists;
int32_t sqlLen;
char* sql;
} SMDropStreamReq;
typedef struct {
@ -3031,12 +3089,20 @@ typedef struct {
int32_t taskId;
} SVDropStreamTaskReq;
typedef struct {
SMsgHead head;
int64_t streamId;
int32_t taskId;
int64_t dataVer;
} SVStreamTaskVerUpdateReq;
typedef struct {
int8_t reserved;
} SVDropStreamTaskRsp;
int32_t tSerializeSMDropStreamReq(void* buf, int32_t bufLen, const SMDropStreamReq* pReq);
int32_t tDeserializeSMDropStreamReq(void* buf, int32_t bufLen, SMDropStreamReq* pReq);
void tFreeSMDropStreamReq(SMDropStreamReq* pReq);
typedef struct {
char name[TSDB_STREAM_FNAME_LEN];
@ -3195,7 +3261,7 @@ typedef struct {
SMsgHead head;
int64_t streamId;
int32_t taskId;
} SVPauseStreamTaskReq;
} SVPauseStreamTaskReq, SVResetStreamTaskReq;
typedef struct {
int8_t reserved;

View File

@ -305,11 +305,11 @@ enum { // WARN: new msg should be appended to segment tail
TD_DEF_MSG_TYPE(TDMT_SYNC_FORCE_FOLLOWER, "sync-force-become-follower", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_VND_STREAM_MSG)
// TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY, "vnode-stream-scan-history", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY_FINISH, "vnode-stream-scan-history-finish", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_CHECK_POINT_SOURCE, "vnode-stream-checkpoint-source", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_UPDATE, "vnode-stream-update", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_RESET, "vnode-stream-reset", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_CHECK, "vnode-stream-task-check", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_MAX_MSG, "vnd-stream-max", NULL, NULL)

View File

@ -362,6 +362,8 @@
#define TK_WAL 343
#define TK_NK_SPACE 600
#define TK_NK_COMMENT 601
#define TK_NK_ILLEGAL 602

View File

@ -29,7 +29,7 @@
extern "C" {
#endif
#define AUDIT_DETAIL_MAX 16000
#define AUDIT_DETAIL_MAX 65472
typedef struct {
const char *server;
@ -39,7 +39,8 @@ typedef struct {
int32_t auditInit(const SAuditCfg *pCfg);
void auditSend(SJson *pJson);
void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, char *detail);
void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2,
char *detail, int32_t len);
#ifdef __cplusplus
}

View File

@ -38,6 +38,9 @@ extern "C" {
#define META_READER_NOLOCK 0x1
#define STREAM_STATE_BUFF_HASH 1
#define STREAM_STATE_BUFF_SORT 2
typedef struct SMeta SMeta;
typedef TSKEY (*GetTsFun)(void*);
@ -115,6 +118,7 @@ typedef struct SRowBuffPos {
void* pKey;
bool beFlushed;
bool beUsed;
bool needFree;
} SRowBuffPos;
// tq
@ -334,6 +338,8 @@ typedef struct {
void* db; // rocksdb_t* db;
void* pCur;
int64_t number;
void* pStreamFileState;
int32_t buffIndex;
} SStreamStateCur;
typedef struct SStateStore {
@ -341,7 +347,8 @@ typedef struct SStateStore {
int32_t (*streamStateGetParName)(SStreamState* pState, int64_t groupId, void** pVal);
int32_t (*streamStateAddIfNotExist)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
int32_t (*streamStateReleaseBuf)(SStreamState* pState, const SWinKey* key, void* pVal);
int32_t (*streamStateReleaseBuf)(SStreamState* pState, void* pVal, bool used);
int32_t (*streamStateClearBuff)(SStreamState* pState, void* pVal);
void (*streamStateFreeVal)(void* val);
int32_t (*streamStatePut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
@ -372,7 +379,7 @@ typedef struct SStateStore {
int32_t (*streamStateSessionAddIfNotExist)(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal,
int32_t* pVLen);
int32_t (*streamStateSessionPut)(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen);
int32_t (*streamStateSessionPut)(SStreamState* pState, const SSessionKey* key, void* value, int32_t vLen);
int32_t (*streamStateSessionGet)(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen);
int32_t (*streamStateSessionDel)(SStreamState* pState, const SSessionKey* key);
int32_t (*streamStateSessionClear)(SStreamState* pState);
@ -401,7 +408,7 @@ typedef struct SStateStore {
struct SStreamFileState* (*streamFileStateInit)(int64_t memSize, uint32_t keySize, uint32_t rowSize,
uint32_t selectRowSize, GetTsFun fp, void* pFile, TSKEY delMark,
const char* id, int64_t ckId);
const char* id, int64_t ckId, int8_t type);
void (*streamFileStateDestroy)(struct SStreamFileState* pFileState);
void (*streamFileStateClear)(struct SStreamFileState* pFileState);

View File

@ -507,6 +507,7 @@ typedef struct SBalanceVgroupStmt {
typedef struct SBalanceVgroupLeaderStmt {
ENodeType type;
int32_t vgId;
} SBalanceVgroupLeaderStmt;
typedef struct SMergeVgroupStmt {

View File

@ -49,26 +49,30 @@ void streamStateSetNumber(SStreamState* pState, int32_t number);
int32_t streamStateSaveInfo(SStreamState* pState, void* pKey, int32_t keyLen, void* pVal, int32_t vLen);
int32_t streamStateGetInfo(SStreamState* pState, void* pKey, int32_t keyLen, void** pVal, int32_t* pLen);
//session window
int32_t streamStateSessionAddIfNotExist(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal, int32_t* pVLen);
int32_t streamStateSessionPut(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen);
int32_t streamStateSessionPut(SStreamState* pState, const SSessionKey* key, void* value, int32_t vLen);
int32_t streamStateSessionGet(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen);
int32_t streamStateSessionDel(SStreamState* pState, const SSessionKey* key);
int32_t streamStateSessionClear(SStreamState* pState);
int32_t streamStateSessionGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
int32_t streamStateStateAddIfNotExist(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
int32_t streamStateSessionGetKeyByRange(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
SStreamStateCur* streamStateSessionSeekKeyNext(SStreamState* pState, const SSessionKey* key);
SStreamStateCur* streamStateSessionSeekKeyCurrentPrev(SStreamState* pState, const SSessionKey* key);
SStreamStateCur* streamStateSessionSeekKeyCurrentNext(SStreamState* pState, const SSessionKey* key);
//state window
int32_t streamStateStateAddIfNotExist(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
int32_t streamStateFillPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
int32_t streamStateFillDel(SStreamState* pState, const SWinKey* key);
int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal);
int32_t streamStateReleaseBuf(SStreamState* pState, void* pVal, bool used);
int32_t streamStateClearBuff(SStreamState* pState, void* pVal);
void streamStateFreeVal(void* val);
SStreamStateCur* streamStateGetAndCheckCur(SStreamState* pState, SWinKey* key);
@ -76,14 +80,11 @@ SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key
SStreamStateCur* streamStateFillSeekKeyNext(SStreamState* pState, const SWinKey* key);
SStreamStateCur* streamStateFillSeekKeyPrev(SStreamState* pState, const SWinKey* key);
void streamStateFreeCur(SStreamStateCur* pCur);
void streamStateResetCur(SStreamStateCur* pCur);
int32_t streamStateGetGroupKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
int32_t streamStateGetFirst(SStreamState* pState, SWinKey* key);
int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur);
int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur);
int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur);
int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
@ -91,6 +92,7 @@ int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char*
int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal);
void streamStateReloadInfo(SStreamState* pState, TSKEY ts);
SStreamStateCur* createStreamStateCursor();
/***compare func **/

View File

@ -29,7 +29,23 @@ extern "C" {
#ifndef _STREAM_H_
#define _STREAM_H_
typedef struct SStreamTask SStreamTask;
#define ONE_MiB_F (1048576.0)
#define ONE_KiB_F (1024.0)
#define SIZE_IN_MiB(_v) ((_v) / ONE_MiB_F)
#define SIZE_IN_KiB(_v) ((_v) / ONE_KiB_F)
#define TASK_DOWNSTREAM_READY 0x0
#define TASK_DOWNSTREAM_NOT_READY 0x1
#define TASK_DOWNSTREAM_NOT_LEADER 0x2
#define TASK_SELF_NEW_STAGE 0x3
#define NODE_ROLE_UNINIT 0x1
#define NODE_ROLE_LEADER 0x2
#define NODE_ROLE_FOLLOWER 0x3
typedef struct SStreamTask SStreamTask;
typedef struct SStreamQueue SStreamQueue;
#define SSTREAM_TASK_VER 2
enum {
@ -64,6 +80,7 @@ enum {
TASK_INPUT_STATUS__NORMAL = 1,
TASK_INPUT_STATUS__BLOCKED,
TASK_INPUT_STATUS__FAILED,
TASK_INPUT_STATUS__REFUSED,
};
enum {
@ -106,6 +123,7 @@ typedef struct {
} SStreamQueueItem;
typedef void FTbSink(SStreamTask* pTask, void* vnode, void* data);
typedef void FSmaSink(void* vnode, int64_t smaId, const SArray* data);
typedef int32_t FTaskExpand(void* ahandle, SStreamTask* pTask, int64_t ver);
typedef struct {
@ -154,8 +172,6 @@ typedef struct {
int64_t size;
} SStreamQueueRes;
void streamFreeQitem(SStreamQueueItem* data);
#if 0
bool streamQueueResEmpty(const SStreamQueueRes* pRes);
int64_t streamQueueResSize(const SStreamQueueRes* pRes);
@ -175,22 +191,9 @@ int32_t streamQueuePush(SStreamQueue1* pQueue, SStreamQueueItem* pItem);
SStreamQueueRes streamQueueGetRes(SStreamQueue1* pQueue);
#endif
typedef struct {
STaosQueue* pQueue;
STaosQall* qall;
void* qItem;
int8_t status;
} SStreamQueue;
int32_t streamInit();
void streamCleanUp();
SStreamQueue* streamQueueOpen(int64_t cap);
void streamQueueClose(SStreamQueue* pQueue, int32_t taskId);
void streamQueueProcessSuccess(SStreamQueue* queue);
void streamQueueProcessFail(SStreamQueue* queue);
void* streamQueueNextItem(SStreamQueue* pQueue);
SStreamDataSubmit* streamDataSubmitNew(SPackedData* pData, int32_t type);
void streamDataSubmitDestroy(SStreamDataSubmit* pDataSubmit);
@ -204,7 +207,7 @@ typedef struct {
int32_t taskId;
int32_t nodeId;
SEpSet epSet;
} STaskDispatcherFixedEp;
} STaskDispatcherFixed;
typedef struct {
char stbFullName[TSDB_TABLE_FNAME_LEN];
@ -222,8 +225,6 @@ typedef struct {
SSHashObj* pTblInfo;
} STaskSinkTb;
typedef void FSmaSink(void* vnode, int64_t smaId, const SArray* data);
typedef struct {
int64_t smaId;
// following are not applicable to encoder and decoder
@ -244,10 +245,10 @@ typedef struct SStreamChildEpInfo {
int64_t stage; // upstream task stage value, to denote if the upstream node has restart/replica changed/transfer
} SStreamChildEpInfo;
typedef struct SStreamTaskKey {
typedef struct STaskId {
int64_t streamId;
int32_t taskId;
} SStreamTaskKey;
int64_t taskId;
} STaskId;
typedef struct SStreamTaskId {
int64_t streamId;
@ -256,19 +257,22 @@ typedef struct SStreamTaskId {
} SStreamTaskId;
typedef struct SCheckpointInfo {
int64_t startTs;
int64_t checkpointId;
int64_t checkpointVer; // latest checkpointId version
int64_t checkpointVer; // latest checkpointId version
int64_t nextProcessVer; // current offset in WAL, not serialize it
int64_t failedId; // record the latest failed checkpoint id
} SCheckpointInfo;
typedef struct SStreamStatus {
int8_t taskStatus;
int8_t downstreamReady; // downstream tasks are all ready now, if this flag is set
int8_t schedStatus;
int8_t keepTaskStatus;
bool appendTranstateBlock; // has append the transfer state data block already, todo: remove it
int8_t timerActive; // timer is active
int8_t pauseAllowed; // allowed task status to be set to be paused
int8_t taskStatus;
int8_t downstreamReady; // downstream tasks are all ready now, if this flag is set
int8_t schedStatus;
int8_t keepTaskStatus;
bool appendTranstateBlock; // has append the transfer state data block already, todo: remove it
int8_t pauseAllowed; // allowed task status to be set to be paused
int32_t timerActive; // timer is active
int32_t inScanHistorySentinel;
} SStreamStatus;
typedef struct SDataRange {
@ -287,21 +291,27 @@ typedef struct SSTaskBasicInfo {
int64_t triggerParam; // in msec
} SSTaskBasicInfo;
typedef struct SStreamDispatchReq SStreamDispatchReq;
typedef struct STokenBucket STokenBucket;
typedef struct SMetaHbInfo SMetaHbInfo;
typedef struct SDispatchMsgInfo {
void* pData; // current dispatch data
SStreamDispatchReq* pData; // current dispatch data
int8_t dispatchMsgType;
int16_t msgType; // dispatch msg type
int32_t retryCount; // retry send data count
int64_t blockingTs; // output blocking timestamp
int64_t startTs; // dispatch start time, record total elapsed time for dispatch
SArray* pRetryList; // current dispatch successfully completed node of downstream
void* pTimer; // used to dispatch data after a given time duration
} SDispatchMsgInfo;
typedef struct STaskOutputInfo {
int8_t type;
typedef struct STaskOutputQueue {
int8_t status;
SStreamQueue* queue;
} STaskOutputInfo;
} STaskOutputQueue;
typedef struct STaskInputInfo {
int8_t status;
int8_t status;
SStreamQueue* queue;
} STaskInputInfo;
@ -310,62 +320,76 @@ typedef struct STaskSchedInfo {
void* pTimer;
} STaskSchedInfo;
typedef struct SSinkTaskRecorder {
typedef struct SSinkRecorder {
int64_t numOfSubmit;
int64_t numOfBlocks;
int64_t numOfRows;
} SSinkTaskRecorder;
int64_t dataSize;
} SSinkRecorder;
typedef struct {
int64_t created;
int64_t init;
int64_t step1Start;
int64_t step2Start;
int64_t sinkStart;
} STaskTimestamp;
typedef struct STaskExecStatisInfo {
int64_t created;
int64_t init;
int64_t start;
int64_t step1Start;
int64_t step2Start;
int32_t updateCount;
int64_t latestUpdateTs;
int32_t processDataBlocks;
int64_t processDataSize;
int32_t dispatch;
int64_t dispatchDataSize;
int32_t checkpoint;
SSinkRecorder sink;
} STaskExecStatisInfo;
typedef struct STokenBucket {
int32_t capacity; // total capacity
int64_t fillTimestamp;// fill timestamp
int32_t numOfToken; // total available tokens
int32_t rate; // number of token per second
} STokenBucket;
typedef struct SHistoryTaskInfo {
STaskId id;
void* pTimer;
int32_t tickCount;
int32_t retryTimes;
int32_t waitInterval;
} SHistoryTaskInfo;
struct SStreamTask {
int64_t ver;
SStreamTaskId id;
SSTaskBasicInfo info;
STaskOutputInfo outputInfo;
STaskInputInfo inputInfo;
STaskSchedInfo schedInfo;
SDispatchMsgInfo msgInfo;
SStreamStatus status;
SCheckpointInfo chkInfo;
STaskExec exec;
SDataRange dataRange;
SStreamTaskId historyTaskId;
SStreamTaskId streamTaskId;
STaskTimestamp tsInfo;
SArray* pReadyMsgList; // SArray<SStreamChkptReadyInfo*>
TdThreadMutex lock; // secure the operation of set task status and puting data into inputQ
SArray* pUpstreamInfoList;
// output
typedef struct STaskOutputInfo {
union {
STaskDispatcherFixedEp fixedEpDispatcher;
STaskDispatcherFixed fixedDispatcher;
STaskDispatcherShuffle shuffleDispatcher;
STaskSinkTb tbSink;
STaskSinkSma smaSink;
STaskSinkFetch fetchSink;
};
SSinkTaskRecorder sinkRecorder;
STokenBucket tokenBucket;
int8_t type;
STokenBucket* pTokenBucket;
} STaskOutputInfo;
void* launchTaskTimer;
SMsgCb* pMsgCb; // msg handle
SStreamState* pState; // state backend
SArray* pRspMsgList;
typedef struct SUpstreamInfo {
SArray* pList;
int32_t numOfClosed;
} SUpstreamInfo;
struct SStreamTask {
int64_t ver;
SStreamTaskId id;
SSTaskBasicInfo info;
STaskOutputQueue outputq;
STaskInputInfo inputInfo;
STaskSchedInfo schedInfo;
STaskOutputInfo outputInfo;
SDispatchMsgInfo msgInfo;
SStreamStatus status;
SCheckpointInfo chkInfo;
STaskExec exec;
SDataRange dataRange;
SHistoryTaskInfo hTaskInfo;
STaskId streamTaskId;
STaskExecStatisInfo execInfo;
SArray* pReadyMsgList; // SArray<SStreamChkptReadyInfo*>
TdThreadMutex lock; // secure the operation of set task status and puting data into inputQ
SMsgCb* pMsgCb; // msg handle
SStreamState* pState; // state backend
SArray* pRspMsgList;
SUpstreamInfo upstreamInfo;
// the followings attributes don't be serialized
int32_t notReadyTasks;
int32_t numOfWaitingUpstream;
@ -381,11 +405,13 @@ struct SStreamTask {
char reserve[256];
};
typedef struct SMetaHbInfo {
tmr_h hbTmr;
int32_t stopFlag;
int32_t tickCounter;
} SMetaHbInfo;
typedef struct STaskStartInfo {
int64_t startTs;
int64_t readyTs;
int32_t startedAfterNodeUpdate;
SHashObj* pReadyTaskSet; // tasks that are all ready for running stream processing
int32_t elapsedTime;
} STaskStartInfo;
// meta
typedef struct SStreamMeta {
@ -393,22 +419,25 @@ typedef struct SStreamMeta {
TDB* db;
TTB* pTaskDb;
TTB* pCheckpointDb;
SHashObj* pTasks;
SArray* pTaskList; // SArray<task_id*>
SHashObj* pTasksMap;
SArray* pTaskList; // SArray<STaskId*>
void* ahandle;
TXN* txn;
FTaskExpand* expandFunc;
int32_t vgId;
int64_t stage;
int32_t role;
STaskStartInfo startInfo;
SRWLatch lock;
int32_t walScanCounter;
void* streamBackend;
int64_t streamBackendRid;
SHashObj* pTaskBackendUnique;
TdThreadMutex backendMutex;
SMetaHbInfo hbInfo;
int32_t closedTask;
int32_t totalTasks; // this value should be increased when a new task is added into the meta
SMetaHbInfo* pHbInfo;
SHashObj* pUpdateTaskSet;
int32_t numOfStreamTasks; // this value should be increased when a new task is added into the meta
int32_t numOfPausedTasks;
int32_t chkptNotReadyTasks;
int64_t rid;
@ -417,26 +446,25 @@ typedef struct SStreamMeta {
SArray* chkpInUse;
int32_t chkpCap;
SRWLatch chkpDirLock;
int32_t pauseTaskNum;
} SStreamMeta;
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
int32_t tDecodeStreamEpInfo(SDecoder* pDecoder, SStreamChildEpInfo* pInfo);
SStreamTask* tNewStreamTask(int64_t streamId, int8_t taskLevel, int8_t fillHistory, int64_t triggerParam,
SArray* pTaskList);
SStreamTask* tNewStreamTask(int64_t streamId, int8_t taskLevel, bool fillHistory, int64_t triggerParam,
SArray* pTaskList, bool hasFillhistory);
int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
void tFreeStreamTask(SStreamTask* pTask);
int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, int64_t ver);
int32_t tDecodeStreamTaskChkInfo(SDecoder* pDecoder, SCheckpointInfo* pChkpInfo);
int32_t tDecodeStreamTaskId(SDecoder* pDecoder, SStreamTaskId* pTaskId);
int32_t tDecodeStreamTaskId(SDecoder* pDecoder, STaskId* pTaskId);
int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem);
int32_t streamTaskPutDataIntoOutputQ(SStreamTask* pTask, SStreamDataBlock* pBlock);
int32_t streamTaskPutTranstateIntoInputQ(SStreamTask* pTask);
bool streamQueueIsFull(const STaosQueue* pQueue, bool inputQ);
bool streamQueueIsFull(const SStreamQueue* pQueue);
typedef struct {
SMsgHead head;
@ -444,11 +472,12 @@ typedef struct {
int32_t taskId;
} SStreamTaskRunReq;
typedef struct {
struct SStreamDispatchReq {
int32_t type;
int64_t stage; // nodeId from upstream task
int64_t streamId;
int32_t taskId;
int32_t msgId; // msg id to identify if the incoming msg from the same sender
int32_t srcVgId;
int32_t upstreamTaskId;
int32_t upstreamChildId;
@ -457,7 +486,7 @@ typedef struct {
int64_t totalLen;
SArray* dataLen; // SArray<int32_t>
SArray* data; // SArray<SRetrieveTableRsp*>
} SStreamDispatchReq;
};
typedef struct {
int64_t streamId;
@ -465,7 +494,9 @@ typedef struct {
int32_t upstreamTaskId;
int32_t downstreamNodeId;
int32_t downstreamTaskId;
int32_t msgId;
int8_t inputStatus;
int64_t stage;
} SStreamDispatchRsp;
typedef struct {
@ -522,7 +553,7 @@ typedef struct {
int32_t downstreamTaskId;
int32_t upstreamNodeId;
int32_t childId;
} SStreamScanHistoryFinishReq, SStreamTransferReq;
} SStreamScanHistoryFinishReq;
int32_t tEncodeStreamScanHistoryFinishReq(SEncoder* pEncoder, const SStreamScanHistoryFinishReq* pReq);
int32_t tDecodeStreamScanHistoryFinishReq(SDecoder* pDecoder, SStreamScanHistoryFinishReq* pReq);
@ -568,9 +599,19 @@ int32_t tEncodeStreamCheckpointReadyMsg(SEncoder* pEncoder, const SStreamCheckpo
int32_t tDecodeStreamCheckpointReadyMsg(SDecoder* pDecoder, SStreamCheckpointReadyMsg* pRsp);
typedef struct STaskStatusEntry {
int64_t streamId;
int32_t taskId;
STaskId id;
int32_t status;
int32_t stage;
int32_t nodeId;
int64_t verStart; // start version in WAL, only valid for source task
int64_t verEnd; // end version in WAL, only valid for source task
int64_t processedVer; // only valid for source task
int64_t activeCheckpointId; // current active checkpoint id
bool checkpointFailed; // denote if the checkpoint is failed or not
double inputQUsed; // in MiB
double inputRate;
double sinkQuota; // existed quota size for sink task
double sinkDataSize; // sink to dest data size
} STaskStatusEntry;
typedef struct SStreamHbMsg {
@ -636,15 +677,14 @@ void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq);
int32_t streamSetupScheduleTrigger(SStreamTask* pTask);
int32_t streamProcessRunReq(SStreamTask* pTask);
int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg, bool exec);
int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg);
int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code);
int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pMsg);
SStreamChildEpInfo* streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t taskId);
void streamTaskInputFail(SStreamTask* pTask);
int32_t streamTryExec(SStreamTask* pTask);
int32_t streamExecTask(SStreamTask* pTask);
int32_t streamSchedExec(SStreamTask* pTask);
bool streamTaskShouldStop(const SStreamStatus* pStatus);
bool streamTaskShouldPause(const SStreamStatus* pStatus);
@ -656,10 +696,14 @@ char* createStreamTaskIdStr(int64_t streamId, int32_t taskId);
// recover and fill history
void streamTaskCheckDownstream(SStreamTask* pTask);
int32_t streamTaskLaunchScanHistory(SStreamTask* pTask);
int32_t streamTaskStartScanHistory(SStreamTask* pTask);
int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_t vgId, int64_t stage);
int32_t streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList);
void streamTaskResetUpstreamStageInfo(SStreamTask* pTask);
bool streamTaskAllUpstreamClosed(SStreamTask* pTask);
bool streamTaskSetSchedStatusWait(SStreamTask* pTask);
int8_t streamTaskSetSchedStatusActive(SStreamTask* pTask);
int8_t streamTaskSetSchedStatusInActive(SStreamTask* pTask);
int32_t streamTaskStop(SStreamTask* pTask);
int32_t streamSendCheckRsp(const SStreamMeta* pMeta, const SStreamTaskCheckReq* pReq, SStreamTaskCheckRsp* pRsp,
@ -670,14 +714,15 @@ int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask);
int32_t streamStartScanHistoryAsync(SStreamTask* pTask, int8_t igUntreated);
bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer);
int32_t streamQueueGetNumOfItems(const SStreamQueue* pQueue);
int32_t streamQueueGetAvailableSpace(const SStreamQueue* pQueue, int32_t* availNum, double* availSize);
// common
int32_t streamRestoreParam(SStreamTask* pTask);
int32_t streamSetStatusNormal(SStreamTask* pTask);
int32_t streamSetStatusUnint(SStreamTask* pTask);
const char* streamGetTaskStatusStr(int32_t status);
void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta);
void streamTaskResume(SStreamTask* pTask, SStreamMeta* pMeta);
void streamTaskHalt(SStreamTask* pTask);
void streamTaskResumeFromHalt(SStreamTask* pTask);
void streamTaskDisablePause(SStreamTask* pTask);
void streamTaskEnablePause(SStreamTask* pTask);
@ -690,6 +735,9 @@ int32_t streamTaskReloadState(SStreamTask* pTask);
void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId);
void streamTaskOpenAllUpstreamInput(SStreamTask* pTask);
void streamTaskStatusInit(STaskStatusEntry* pEntry, const SStreamTask* pTask);
void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc);
// source level
int32_t streamSetParamForStreamScannerStep1(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
@ -707,24 +755,27 @@ void streamMetaCleanup();
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId, int64_t stage);
void streamMetaClose(SStreamMeta* streamMeta);
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask); // save to stream meta store
int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int64_t* pKey);
int32_t streamMetaRemoveTask(SStreamMeta* pMeta, STaskId* pKey);
int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask, bool* pAdded);
int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta);
int32_t streamMetaGetNumOfStreamTasks(SStreamMeta* pMeta);
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
int32_t streamMetaReopen(SStreamMeta* pMeta, int64_t chkpId);
int32_t streamMetaReopen(SStreamMeta* pMeta);
int32_t streamMetaCommit(SStreamMeta* pMeta);
int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta);
void streamMetaNotifyClose(SStreamMeta* pMeta);
void streamMetaStartHb(SStreamMeta* pMeta);
void streamMetaInitForSnode(SStreamMeta* pMeta);
// checkpoint
int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);
int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask);
void streamTaskClearCheckInfo(SStreamTask* pTask);
int32_t streamAlignTransferState(SStreamTask* pTask);
int32_t streamBuildAndSendDropTaskMsg(SMsgCb* pMsgCb, int32_t vgId, SStreamTaskId* pTaskId);
int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SStreamTask* pTask,
int8_t isSucceed);
int32_t buildCheckpointSourceRsp(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SRpcMsg* pMsg,

View File

@ -28,20 +28,33 @@ extern "C" {
#endif
typedef struct SStreamFileState SStreamFileState;
typedef SList SStreamSnapshot;
typedef SList SStreamSnapshot;
typedef void* (*_state_buff_get_fn)(void* pRowBuff, const void* pKey, size_t keyLen);
typedef int32_t (*_state_buff_put_fn)(void* pRowBuff, const void* pKey, size_t keyLen, const void* data, size_t dataLen);
typedef int32_t (*_state_buff_remove_fn)(void* pRowBuff, const void* pKey, size_t keyLen);
typedef int32_t (*_state_buff_remove_by_pos_fn)(SStreamFileState* pState, SRowBuffPos* pPos);
typedef void (*_state_buff_cleanup_fn)(void* pRowBuff);
typedef void* (*_state_buff_create_statekey_fn)(SRowBuffPos* pPos, int64_t num);
typedef int32_t (*_state_file_remove_fn)(SStreamFileState* pFileState, const void* pKey);
typedef int32_t (*_state_file_get_fn)(SStreamFileState* pFileState, void* pKey, void* data, int32_t* pDataLen);
typedef int32_t (*_state_file_clear_fn)(SStreamState* pState);
SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize,
GetTsFun fp, void* pFile, TSKEY delMark, const char* taskId,
int64_t checkpointId);
int64_t checkpointId, int8_t type);
void streamFileStateDestroy(SStreamFileState* pFileState);
void streamFileStateClear(SStreamFileState* pFileState);
bool needClearDiskBuff(SStreamFileState* pFileState);
void streamFileStateReleaseBuff(SStreamFileState* pFileState, SRowBuffPos* pPos, bool used);
int32_t streamFileStateClearBuff(SStreamFileState* pFileState, SRowBuffPos* pPos);
int32_t getRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen);
int32_t deleteRowBuff(SStreamFileState* pFileState, const void* pKey, int32_t keyLen);
int32_t getRowBuffByPos(SStreamFileState* pFileState, SRowBuffPos* pPos, void** pVal);
void releaseRowBuffPos(SRowBuffPos* pBuff);
bool hasRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen);
void putFreeBuff(SStreamFileState* pFileState, SRowBuffPos* pPos);
SStreamSnapshot* getSnapshot(SStreamFileState* pFileState);
int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, bool flushState);
@ -52,6 +65,34 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark);
int32_t streamFileStateGeSelectRowSize(SStreamFileState* pFileState);
void streamFileStateReloadInfo(SStreamFileState* pFileState, TSKEY ts);
void* getRowStateBuff(SStreamFileState* pFileState);
void* getStateFileStore(SStreamFileState* pFileState);
bool isDeteled(SStreamFileState* pFileState, TSKEY ts);
bool isFlushedState(SStreamFileState* pFileState, TSKEY ts, TSKEY gap);
SRowBuffPos* getNewRowPosForWrite(SStreamFileState* pFileState);
int32_t getRowStateRowSize(SStreamFileState* pFileState);
// session window
int32_t getSessionWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, TSKEY gap, void** pVal, int32_t* pVLen);
int32_t putSessionWinResultBuff(SStreamFileState* pFileState, SRowBuffPos* pPos);
int32_t getSessionFlushedBuff(SStreamFileState* pFileState, SSessionKey* pKey, void** pVal, int32_t* pVLen);
int32_t deleteSessionWinStateBuffFn(void* pBuff, const void *key, size_t keyLen);
int32_t deleteSessionWinStateBuffByPosFn(SStreamFileState* pFileState, SRowBuffPos* pPos);
void sessionWinStateClear(SStreamFileState* pFileState);
void sessionWinStateCleanup(void* pBuff);
SStreamStateCur* sessionWinStateSeekKeyCurrentPrev(SStreamFileState* pFileState, const SSessionKey* pWinKey);
SStreamStateCur* sessionWinStateSeekKeyCurrentNext(SStreamFileState* pFileState, const SSessionKey* pWinKey);
SStreamStateCur* sessionWinStateSeekKeyNext(SStreamFileState* pFileState, const SSessionKey* pWinKey);
int32_t sessionWinStateGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
int32_t sessionWinStateMoveToNext(SStreamStateCur* pCur);
int32_t sessionWinStateGetKeyByRange(SStreamFileState* pFileState, const SSessionKey* key, SSessionKey* curKey);
// state window
int32_t getStateWinResultBuff(SStreamFileState* pFileState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
#ifdef __cplusplus
}
#endif

View File

@ -225,7 +225,10 @@ void syslog(int unused, const char *format, ...);
#endif
#else
// Windows
#define setThreadName(name)
#define setThreadName(name) \
do { \
pthread_setname_np(taosThreadSelf(), (name)); \
} while (0)
#endif
#if defined(_WIN32)

View File

@ -54,6 +54,17 @@ typedef int32_t (*__ext_compar_fn_t)(const void *p1, const void *p2, const void
*/
void taosqsort(void *src, int64_t numOfElem, int64_t size, const void *param, __ext_compar_fn_t comparFn);
/**
* merge sort, with the compare function requiring additional parameters support
*
* @param src
* @param numOfElem
* @param size
* @param comparFn
* @return int32_t 0 for success, other for failure.
*/
int32_t taosMergeSort(void *src, int64_t numOfElem, int64_t size, __compar_fn_t comparFn);
/**
* binary search, with range support
*

View File

@ -214,12 +214,19 @@ void taosArrayDestroyEx(SArray* pArray, FDelete fp);
void taosArraySwap(SArray* a, SArray* b);
/**
* sort the array
* sort the array use qsort
* @param pArray
* @param compar
*/
void taosArraySort(SArray* pArray, __compar_fn_t comparFn);
/**
* sort the array use merge sort
* @param pArray
* @param compar
*/
int32_t taosArrayMSort(SArray* pArray, __compar_fn_t comparFn);
/**
* search the array
* @param pArray

View File

@ -165,6 +165,13 @@ static FORCE_INLINE int32_t tarray2SortInsert(void *arr, const void *elePtr, int
#define TARRAY2_FOREACH_PTR_REVERSE(a, ep) \
for (int32_t __i = (a)->size - 1; __i >= 0 && ((ep) = &(a)->data[__i], 1); __i--)
#define TARRAY2_SORT(a, cmp) \
do { \
if ((a)->size > 1) { \
taosSort((a)->data, (a)->size, sizeof((a)->data[0]), (__compar_fn_t)cmp); \
} \
} while (0)
#ifdef __cplusplus
}
#endif

View File

@ -382,6 +382,7 @@ typedef enum ELogicConditionType {
#define TSDB_MAX_STT_TRIGGER 1
#define TSDB_DEFAULT_SST_TRIGGER 1
#endif
#define TSDB_STT_TRIGGER_ARRAY_SIZE 16 // maximum of TSDB_MAX_STT_TRIGGER of TD_ENTERPRISE and TD_COMMUNITY
#define TSDB_MIN_HASH_PREFIX (2 - TSDB_TABLE_NAME_LEN)
#define TSDB_MAX_HASH_PREFIX (TSDB_TABLE_NAME_LEN - 2)
#define TSDB_DEFAULT_HASH_PREFIX 0

View File

@ -55,6 +55,7 @@ extern int32_t tmrDebugFlag;
extern int32_t uDebugFlag;
extern int32_t rpcDebugFlag;
extern int32_t qDebugFlag;
extern int32_t stDebugFlag;
extern int32_t wDebugFlag;
extern int32_t sDebugFlag;
extern int32_t tsdbDebugFlag;

View File

@ -98,6 +98,9 @@
# enable/disable system monitor
# monitor 1
# enable/disable audit log
# audit 1
# The following parameter is used to limit the maximum number of lines in log files.
# max number of lines per log filters
# numOfLogLines 10000000

View File

@ -8,7 +8,7 @@ Type=simple
ExecStart=/usr/bin/taosd
ExecStartPre=/usr/local/taos/bin/startPre.sh
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0

View File

@ -89,7 +89,7 @@ else
${build_dir}/bin/taosBenchmark \
${build_dir}/bin/TDinsight.sh \
${build_dir}/bin/tdengine-datasource.zip \
${build_dir}/bin/tdengine-datasource.zip.md5sum"
${build_dir}/bin/tdengine-datasource.zip.md5"
fi
[ -f ${build_dir}/bin/taosx ] && taosx_bin="${build_dir}/bin/taosx"

View File

@ -157,6 +157,10 @@ STscObj* taos_connect_internal(const char* ip, const char* user, const char* pas
tscDebug("new app inst mgr %p, user:%s, ip:%s, port:%d", p, user, epSet.epSet.eps[0].fqdn, epSet.epSet.eps[0].port);
pInst = &p;
} else {
ASSERTS((*pInst) && (*pInst)->pAppHbMgr, "*pInst:%p, pAppHgMgr:%p", *pInst, (*pInst) ? (*pInst)->pAppHbMgr : NULL);
// reset to 0 in case of conn with duplicated user key but its user has ever been dropped.
atomic_store_8(&(*pInst)->pAppHbMgr->connHbFlag, 0);
}
taosThreadMutexUnlock(&appInfo.mutex);

View File

@ -377,6 +377,7 @@ _exit:
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
pCreateReq = req.pReqs + iReq;
taosMemoryFreeClear(pCreateReq->comment);
taosMemoryFreeClear(pCreateReq->sql);
if (pCreateReq->type == TSDB_CHILD_TABLE) {
taosArrayDestroy(pCreateReq->ctb.tagName);
}

View File

@ -26,8 +26,7 @@
#define EMPTY_BLOCK_POLL_IDLE_DURATION 10
#define DEFAULT_AUTO_COMMIT_INTERVAL 5000
#define OFFSET_IS_RESET_OFFSET(_of) ((_of) < 0)
#define DEFAULT_HEARTBEAT_INTERVAL 3000
struct SMqMgmt {
int8_t inited;
@ -64,7 +63,6 @@ struct tmq_conf_t {
int8_t withTbName;
int8_t snapEnable;
int8_t replayEnable;
bool hbBgEnable;
uint16_t port;
int32_t autoCommitInterval;
char* ip;
@ -85,7 +83,6 @@ struct tmq_t {
int8_t resetOffsetCfg;
int8_t replayEnable;
uint64_t consumerId;
bool hbBgEnable;
tmq_commit_cb* commitCb;
void* commitCbUserParam;
@ -266,8 +263,7 @@ tmq_conf_t* tmq_conf_new() {
conf->withTbName = false;
conf->autoCommit = true;
conf->autoCommitInterval = DEFAULT_AUTO_COMMIT_INTERVAL;
conf->resetOffset = TMQ_OFFSET__RESET_EARLIEST;
conf->hbBgEnable = true;
conf->resetOffset = TMQ_OFFSET__RESET_LATEST;
return conf;
}
@ -824,7 +820,7 @@ void tmqSendHbReq(void* param, void* tmrId) {
OVER:
tDeatroySMqHbReq(&req);
taosTmrReset(tmqSendHbReq, 1000, param, tmqMgmt.timer, &tmq->hbLiveTimer);
taosTmrReset(tmqSendHbReq, DEFAULT_HEARTBEAT_INTERVAL, param, tmqMgmt.timer, &tmq->hbLiveTimer);
taosReleaseRef(tmqMgmt.rsetId, refId);
}
@ -1083,8 +1079,6 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
}
taosInitRWLatch(&pTmq->lock);
pTmq->hbBgEnable = conf->hbBgEnable;
// assign consumerId
pTmq->consumerId = tGenIdPI64();
@ -1108,19 +1102,16 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
goto _failed;
}
if (pTmq->hbBgEnable) {
int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t));
*pRefId = pTmq->refId;
pTmq->hbLiveTimer = taosTmrStart(tmqSendHbReq, 1000, pRefId, tmqMgmt.timer);
}
int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t));
*pRefId = pTmq->refId;
pTmq->hbLiveTimer = taosTmrStart(tmqSendHbReq, DEFAULT_HEARTBEAT_INTERVAL, pRefId, tmqMgmt.timer);
char buf[TSDB_OFFSET_LEN] = {0};
STqOffsetVal offset = {.type = pTmq->resetOffsetCfg};
tFormatOffset(buf, tListLen(buf), &offset);
tscInfo("consumer:0x%" PRIx64 " is setup, refId:%" PRId64
", groupId:%s, snapshot:%d, autoCommit:%d, commitInterval:%dms, offset:%s, backgroudHB:%d",
pTmq->consumerId, pTmq->refId, pTmq->groupId, pTmq->useSnapshot, pTmq->autoCommit, pTmq->autoCommitInterval,
buf, pTmq->hbBgEnable);
", groupId:%s, snapshot:%d, autoCommit:%d, commitInterval:%dms, offset:%s",
pTmq->consumerId, pTmq->refId, pTmq->groupId, pTmq->useSnapshot, pTmq->autoCommit, pTmq->autoCommitInterval, buf);
return pTmq;
@ -1382,7 +1373,7 @@ END:
taosReleaseRef(tmqMgmt.rsetId, refId);
FAIL:
tsem_post(&tmq->rspSem);
if(tmq) tsem_post(&tmq->rspSem);
taosMemoryFree(pParam);
if(pMsg) taosMemoryFreeClear(pMsg->pData);
if(pMsg) taosMemoryFreeClear(pMsg->pEpSet);

View File

@ -47,7 +47,8 @@ void printSubResults(void* pRes, int32_t* totalRows) {
int32_t precision = taos_result_precision(pRes);
taos_print_row(buf, row, fields, numOfFields);
*totalRows += 1;
printf("vgId: %d, offset: %lld, precision: %d, row content: %s\n", vgId, offset, precision, buf);
std::cout << "vgId:" << vgId << ", offset:" << offset << ", precision:" << precision << ", row content:" << buf
<< std::endl;
}
// taos_free_result(pRes);
@ -832,7 +833,7 @@ TEST(clientCase, projection_query_tables) {
for(int32_t i = 0; i < 1000000; ++i) {
char t[512] = {0};
sprintf(t, "insert into t1 values(%ld, %ld)", start + i, i);
sprintf(t, "insert into t1 values(now, %d)", i);
while(1) {
void* p = taos_query(pConn, t);
code = taos_errno(p);
@ -1167,16 +1168,19 @@ TEST(clientCase, tmq_commit) {
}
for(int i = 0; i < numOfAssign; i++){
printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end);
tmq_topic_assignment* pa = &pAssign[i];
std::cout << "assign i:" << i << ", vgId:" << pa->vgId << ", offset:" << pa->currentOffset << ", start:%"
<< pa->begin << ", end:%" << pa->end << std::endl;
int64_t committed = tmq_committed(tmq, topicName, pAssign[i].vgId);
printf("committed vgId:%d, committed:%lld\n", pAssign[i].vgId, committed);
int64_t committed = tmq_committed(tmq, topicName, pa->vgId);
std::cout << "committed vgId:" << pa->vgId << " committed:" << committed << std::endl;
int64_t position = tmq_position(tmq, topicName, pAssign[i].vgId);
printf("position vgId:%d, position:%lld\n", pAssign[i].vgId, position);
tmq_offset_seek(tmq, topicName, pAssign[i].vgId, 1);
position = tmq_position(tmq, topicName, pAssign[i].vgId);
printf("after seek 1, position vgId:%d, position:%lld\n", pAssign[i].vgId, position);
int64_t position = tmq_position(tmq, topicName, pa->vgId);
std::cout << "position vgId:" << pa->vgId << ", position:" << position << std::endl;
tmq_offset_seek(tmq, topicName, pa->vgId, 1);
position = tmq_position(tmq, topicName, pa->vgId);
std::cout << "after seek 1, position vgId:" << pa->vgId << " position:" << position << std::endl;
}
while (1) {
@ -1191,12 +1195,14 @@ TEST(clientCase, tmq_commit) {
tmq_commit_sync(tmq, pRes);
for(int i = 0; i < numOfAssign; i++) {
int64_t committed = tmq_committed(tmq, topicName, pAssign[i].vgId);
printf("committed vgId:%d, committed:%lld\n", pAssign[i].vgId, committed);
std::cout << "committed vgId:" << pAssign[i].vgId << " , committed:" << committed << std::endl;
if(committed > 0){
int32_t code = tmq_commit_offset_sync(tmq, topicName, pAssign[i].vgId, 4);
printf("tmq_commit_offset_sync vgId:%d, offset:4, code:%d\n", pAssign[i].vgId, code);
int64_t committed = tmq_committed(tmq, topicName, pAssign[i].vgId);
printf("after tmq_commit_offset_sync, committed vgId:%d, committed:%lld\n", pAssign[i].vgId, committed);
std::cout << "after tmq_commit_offset_sync, committed vgId:" << pAssign[i].vgId << ", committed:" << committed
<< std::endl;
}
}
if (pRes != NULL) {
@ -1212,7 +1218,12 @@ TEST(clientCase, tmq_commit) {
taos_close(pConn);
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
}
namespace {
void doPrintInfo(tmq_topic_assignment* pa, int32_t index) {
std::cout << "assign i:" << index << ", vgId:" << pa->vgId << ", offset:%" << pa->currentOffset << ", start:%"
<< pa->begin << ", end:%" << pa->end << std::endl;
}
}
TEST(clientCase, td_25129) {
// taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg");
@ -1264,7 +1275,7 @@ TEST(clientCase, td_25129) {
}
for(int i = 0; i < numOfAssign; i++){
printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end);
doPrintInfo(&pAssign[i], i);
}
// tmq_offset_seek(tmq, "tp", pAssign[0].vgId, 4);
@ -1281,7 +1292,7 @@ TEST(clientCase, td_25129) {
}
for(int i = 0; i < numOfAssign; i++){
printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end);
doPrintInfo(&pAssign[i], i);
}
tmq_free_assignment(pAssign);
@ -1298,7 +1309,7 @@ TEST(clientCase, td_25129) {
for(int i = 0; i < numOfAssign; i++){
int64_t committed = tmq_committed(tmq, topicName, pAssign[i].vgId);
printf("assign i:%d, vgId:%d, committed:%lld, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, committed, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end);
doPrintInfo(&pAssign[i], i);
}
while (1) {
@ -1328,7 +1339,7 @@ TEST(clientCase, td_25129) {
}
for(int i = 0; i < numOfAssign; i++){
printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end);
doPrintInfo(&pAssign[i], i);
}
} else {
for(int i = 0; i < numOfAssign; i++) {
@ -1364,7 +1375,7 @@ TEST(clientCase, td_25129) {
}
for(int i = 0; i < numOfAssign; i++){
printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end);
doPrintInfo(&pAssign[i], i);
}
tmq_free_assignment(pAssign);

View File

@ -159,11 +159,15 @@ static const SSysDbTableSchema streamSchema[] = {
static const SSysDbTableSchema streamTaskSchema[] = {
{.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "task_id", .bytes = 32, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "node_type", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "task_id", .bytes = 16 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "node_type", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "node_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
{.name = "level", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "level", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "status", .bytes = 15 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "stage", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
{.name = "in_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
// {.name = "out_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "info", .bytes = 25, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
};
static const SSysDbTableSchema userTblsSchema[] = {

View File

@ -2360,27 +2360,26 @@ void trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList
int32_t maxRows = 0;
size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, i);
// it is a reserved column for scalar function, and no data in this column yet.
if (pDst->pData == NULL) {
continue;
}
if (!pBoolList) {
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, i);
// it is a reserved column for scalar function, and no data in this column yet.
if (pDst->pData == NULL) {
continue;
}
int32_t numOfRows = 0;
if (IS_VAR_DATA_TYPE(pDst->info.type)) {
pDst->varmeta.length = 0;
int32_t numOfRows = 0;
if (IS_VAR_DATA_TYPE(pDst->info.type)) {
pDst->varmeta.length = 0;
}
}
}
if (NULL == pBoolList) {
return;
}
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, i);
// it is a reserved column for scalar function, and no data in this column yet.
if (pDst->pData == NULL) {
if (pDst->pData == NULL || (IS_VAR_DATA_TYPE(pDst->info.type) && pDst->varmeta.length == 0)) {
continue;
}

View File

@ -610,9 +610,13 @@ _exit:
return code;
}
void tRowSort(SArray *aRowP) {
if (TARRAY_SIZE(aRowP) <= 1) return;
taosArraySort(aRowP, tRowPCmprFn);
int32_t tRowSort(SArray *aRowP) {
if (TARRAY_SIZE(aRowP) <= 1) return 0;
int32_t code = taosArrayMSort(aRowP, tRowPCmprFn);
if (code != TSDB_CODE_SUCCESS) {
uError("taosArrayMSort failed caused by %d", code);
}
return code;
}
int32_t tRowMerge(SArray *aRowP, STSchema *pTSchema, int8_t flag) {
@ -3590,5 +3594,5 @@ void (*tColDataCalcSMA[])(SColData *pColData, int64_t *sum, int64_t *max, int64_
NULL, // TSDB_DATA_TYPE_DECIMAL
NULL, // TSDB_DATA_TYPE_BLOB
NULL, // TSDB_DATA_TYPE_MEDIUMBLOB
NULL // TSDB_DATA_TYPE_GEOMETRY
tColDataCalcSMAVarType // TSDB_DATA_TYPE_GEOMETRY
};

View File

@ -244,8 +244,8 @@ int32_t tsTtlBatchDropNum = 10000; // number of tables dropped per batch
// internal
int32_t tsTransPullupInterval = 2;
int32_t tsMqRebalanceInterval = 2;
int32_t tsStreamCheckpointTickInterval = 600;
int32_t tsStreamNodeCheckInterval = 10;
int32_t tsStreamCheckpointTickInterval = 300;
int32_t tsStreamNodeCheckInterval = 30;
int32_t tsTtlUnit = 86400;
int32_t tsTtlPushIntervalSec = 10;
int32_t tsTrimVDbIntervalSec = 60 * 60; // interval of trimming db in all vgroups
@ -269,7 +269,7 @@ int8_t tsS3Enabled = false;
int32_t tsS3BlockSize = 4096; // number of tsdb pages
int32_t tsS3BlockCacheSize = 16; // number of blocks
int32_t tsCheckpointInterval = 20;
int32_t tsCheckpointInterval = 300;
#ifndef _STORAGE
int32_t taosSetTfsCfg(SConfig *pCfg) {
@ -411,6 +411,7 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "idxDebugFlag", idxDebugFlag, 0, 255, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt32(pCfg, "tdbDebugFlag", tdbDebugFlag, 0, 255, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt32(pCfg, "metaDebugFlag", metaDebugFlag, 0, 255, 0) != CFG_SCOPE_SERVER) return -1;
if (cfgAddInt32(pCfg, "stDebugFlag", stDebugFlag, 0, 255, CFG_SCOPE_SERVER) != 0) return -1;
return 0;
}
@ -649,7 +650,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddBool(pCfg, "disableStream", tsDisableStream, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt64(pCfg, "streamBufferSize", tsStreamBufferSize, 0, INT64_MAX, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt64(pCfg, "checkpointInterval", tsCheckpointInterval, 0, INT64_MAX, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt64(pCfg, "checkpointInterval", tsStreamCheckpointTickInterval, 60, 1200, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt32(pCfg, "cacheLazyLoadThreshold", tsCacheLazyLoadThreshold, 0, 100000, CFG_SCOPE_SERVER) != 0)
return -1;
@ -863,6 +864,7 @@ static void taosSetServerLogCfg(SConfig *pCfg) {
idxDebugFlag = cfgGetItem(pCfg, "idxDebugFlag")->i32;
tdbDebugFlag = cfgGetItem(pCfg, "tdbDebugFlag")->i32;
metaDebugFlag = cfgGetItem(pCfg, "metaDebugFlag")->i32;
stDebugFlag = cfgGetItem(pCfg, "stDebugFlag")->i32;
}
static int32_t taosSetSlowLogScope(char *pScope) {
@ -1709,15 +1711,22 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
return;
}
if (strcasecmp(option, "asynclog") == 0) {
int32_t newAsynclog = atoi(value);
uInfo("asynclog set from %d to %d", tsAsyncLog, newAsynclog);
tsAsyncLog = newAsynclog;
return;
}
const char *options[] = {
"dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag", "tqDebugFlag",
"fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tmrDebugFlag", "uDebugFlag",
"smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag", "jniDebugFlag",
"smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag", "jniDebugFlag", "stDebugFlag",
};
int32_t *optionVars[] = {
&dDebugFlag, &vDebugFlag, &mDebugFlag, &wDebugFlag, &sDebugFlag, &tsdbDebugFlag, &tqDebugFlag,
&fsDebugFlag, &udfDebugFlag, &smaDebugFlag, &idxDebugFlag, &tdbDebugFlag, &tmrDebugFlag, &uDebugFlag,
&smaDebugFlag, &rpcDebugFlag, &qDebugFlag, &metaDebugFlag, &jniDebugFlag,
&smaDebugFlag, &rpcDebugFlag, &qDebugFlag, &metaDebugFlag, &jniDebugFlag, &stDebugFlag,
};
int32_t optionSize = tListLen(options);
@ -1770,6 +1779,7 @@ void taosSetAllDebugFlag(int32_t flag, bool rewrite) {
taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag, rewrite);
taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag, rewrite);
taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag, rewrite);
taosSetDebugFlag(&stDebugFlag, "stDebugFlag", flag, rewrite);
uInfo("all debug flag are set to %d", flag);
}

View File

@ -30,6 +30,32 @@
#include "tlog.h"
#define DECODESQL() \
do { \
if(!tDecodeIsEnd(&decoder)){ \
if(tDecodeI32(&decoder, &pReq->sqlLen) < 0) return -1; \
if(pReq->sqlLen > 0){ \
if (tDecodeBinaryAlloc(&decoder, (void **)&pReq->sql, NULL) < 0) return -1; \
} \
} \
} while (0)
#define ENCODESQL() \
do { \
if (pReq->sqlLen > 0 && pReq->sql != NULL){ \
if (tEncodeI32(&encoder, pReq->sqlLen) < 0) return -1; \
if (tEncodeBinary(&encoder, pReq->sql, pReq->sqlLen) < 0) return -1; \
} \
} while (0)
#define FREESQL() \
do { \
if(pReq->sql != NULL){ \
taosMemoryFree(pReq->sql); \
} \
pReq->sql = NULL; \
} while (0)
static int32_t tDecodeSVAlterTbReqCommon(SDecoder *pDecoder, SVAlterTbReq *pReq);
static int32_t tDecodeSBatchDeleteReqCommon(SDecoder *pDecoder, SBatchDeleteReq *pReq);
@ -561,6 +587,8 @@ int32_t tSerializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pReq
if (tEncodeI64(&encoder, pReq->deleteMark1) < 0) return -1;
if (tEncodeI64(&encoder, pReq->deleteMark2) < 0) return -1;
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -656,6 +684,8 @@ int32_t tDeserializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pR
if (tDecodeI64(&decoder, &pReq->deleteMark1) < 0) return -1;
if (tDecodeI64(&decoder, &pReq->deleteMark2) < 0) return -1;
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
@ -668,6 +698,7 @@ void tFreeSMCreateStbReq(SMCreateStbReq *pReq) {
taosMemoryFreeClear(pReq->pComment);
taosMemoryFreeClear(pReq->pAst1);
taosMemoryFreeClear(pReq->pAst2);
FREESQL();
}
int32_t tSerializeSMDropStbReq(void *buf, int32_t bufLen, SMDropStbReq *pReq) {
@ -682,6 +713,7 @@ int32_t tSerializeSMDropStbReq(void *buf, int32_t bufLen, SMDropStbReq *pReq) {
if (tEncodeI8(&encoder, pReq->reserved[i]) < 0) return -1;
}
if (tEncodeI64(&encoder, pReq->suid) < 0) return -1;
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -702,12 +734,18 @@ int32_t tDeserializeSMDropStbReq(void *buf, int32_t bufLen, SMDropStbReq *pReq)
}
if (tDecodeI64(&decoder, &pReq->suid) < 0) return -1;
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSMDropStbReq(SMDropStbReq *pReq) {
FREESQL();
}
int32_t tSerializeSMAlterStbReq(void *buf, int32_t bufLen, SMAlterStbReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@ -727,6 +765,7 @@ int32_t tSerializeSMAlterStbReq(void *buf, int32_t bufLen, SMAlterStbReq *pReq)
if (pReq->commentLen > 0) {
if (tEncodeCStr(&encoder, pReq->comment) < 0) return -1;
}
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -767,6 +806,8 @@ int32_t tDeserializeSMAlterStbReq(void *buf, int32_t bufLen, SMAlterStbReq *pReq
if (tDecodeCStrTo(&decoder, pReq->comment) < 0) return -1;
}
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
@ -776,6 +817,7 @@ void tFreeSMAltertbReq(SMAlterStbReq *pReq) {
taosArrayDestroy(pReq->pFields);
pReq->pFields = NULL;
taosMemoryFreeClear(pReq->comment);
FREESQL();
}
int32_t tSerializeSEpSet(void *buf, int32_t bufLen, const SEpSet *pEpset) {
@ -1084,7 +1126,7 @@ int32_t tDeserializeSNotifyReq(void *buf, int32_t bufLen, SNotifyReq *pReq) {
}
code = 0;
_exit:
tEndDecode(&decoder);
tDecoderClear(&decoder);
@ -1426,6 +1468,7 @@ int32_t tSerializeSDropUserReq(void *buf, int32_t bufLen, SDropUserReq *pReq) {
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->user) < 0) return -1;
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -1439,12 +1482,17 @@ int32_t tDeserializeSDropUserReq(void *buf, int32_t bufLen, SDropUserReq *pReq)
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->user) < 0) return -1;
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSDropUserReq(SDropUserReq *pReq) {
FREESQL();
}
SIpWhiteList *cloneIpWhiteList(SIpWhiteList *pIpWhiteList) {
if (pIpWhiteList == NULL) return NULL;
@ -1470,6 +1518,8 @@ int32_t tSerializeSCreateUserReq(void *buf, int32_t bufLen, SCreateUserReq *pReq
if (tEncodeU32(&encoder, pReq->pIpRanges[i].ip) < 0) return -1;
if (tEncodeU32(&encoder, pReq->pIpRanges[i].mask) < 0) return -1;
}
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -1495,15 +1545,13 @@ int32_t tDeserializeSCreateUserReq(void *buf, int32_t bufLen, SCreateUserReq *pR
if (tDecodeU32(&decoder, &(pReq->pIpRanges[i].ip)) < 0) return -1;
if (tDecodeU32(&decoder, &(pReq->pIpRanges[i].mask)) < 0) return -1;
}
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSCreateUserReq(SCreateUserReq *pReq) { taosMemoryFree(pReq->pIpRanges); }
int32_t tSerializeSUpdateIpWhite(void *buf, int32_t bufLen, SUpdateIpWhite *pReq) {
// impl later
SEncoder encoder = {0};
@ -1602,6 +1650,7 @@ int32_t tSerializeRetrieveIpWhite(void *buf, int32_t bufLen, SRetrieveIpWhiteReq
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeRetrieveIpWhite(void *buf, int32_t bufLen, SRetrieveIpWhiteReq *pReq) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
@ -1614,6 +1663,11 @@ int32_t tDeserializeRetrieveIpWhite(void *buf, int32_t bufLen, SRetrieveIpWhiteR
return 0;
}
void tFreeSCreateUserReq(SCreateUserReq *pReq) {
FREESQL();
taosMemoryFreeClear(pReq->pIpRanges);
}
int32_t tSerializeSAlterUserReq(void *buf, int32_t bufLen, SAlterUserReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@ -1637,6 +1691,7 @@ int32_t tSerializeSAlterUserReq(void *buf, int32_t bufLen, SAlterUserReq *pReq)
if (tEncodeU32(&encoder, pReq->pIpRanges[i].ip) < 0) return -1;
if (tEncodeU32(&encoder, pReq->pIpRanges[i].mask) < 0) return -1;
}
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -1673,6 +1728,7 @@ int32_t tDeserializeSAlterUserReq(void *buf, int32_t bufLen, SAlterUserReq *pReq
if (tDecodeU32(&decoder, &(pReq->pIpRanges[i].ip)) < 0) return -1;
if (tDecodeU32(&decoder, &(pReq->pIpRanges[i].mask)) < 0) return -1;
}
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
@ -1682,6 +1738,7 @@ int32_t tDeserializeSAlterUserReq(void *buf, int32_t bufLen, SAlterUserReq *pReq
void tFreeSAlterUserReq(SAlterUserReq *pReq) {
taosMemoryFreeClear(pReq->tagCond);
taosMemoryFree(pReq->pIpRanges);
FREESQL();
}
int32_t tSerializeSGetUserAuthReq(void *buf, int32_t bufLen, SGetUserAuthReq *pReq) {
@ -2041,6 +2098,7 @@ int32_t tSerializeSCreateDropMQSNodeReq(void *buf, int32_t bufLen, SMCreateQnode
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeI32(&encoder, pReq->dnodeId) < 0) return -1;
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -2054,12 +2112,21 @@ int32_t tDeserializeSCreateDropMQSNodeReq(void *buf, int32_t bufLen, SMCreateQno
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->dnodeId) < 0) return -1;
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSMCreateQnodeReq(SMCreateQnodeReq *pReq){
FREESQL();
}
void tFreeSDDropQnodeReq(SDDropQnodeReq* pReq) {
FREESQL();
}
int32_t tSerializeSDropDnodeReq(void *buf, int32_t bufLen, SDropDnodeReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@ -2070,6 +2137,7 @@ int32_t tSerializeSDropDnodeReq(void *buf, int32_t bufLen, SDropDnodeReq *pReq)
if (tEncodeI32(&encoder, pReq->port) < 0) return -1;
if (tEncodeI8(&encoder, pReq->force) < 0) return -1;
if (tEncodeI8(&encoder, pReq->unsafe) < 0) return -1;
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -2092,12 +2160,17 @@ int32_t tDeserializeSDropDnodeReq(void *buf, int32_t bufLen, SDropDnodeReq *pReq
pReq->unsafe = false;
}
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSDropDnodeReq(SDropDnodeReq *pReq) {
FREESQL();
}
int32_t tSerializeSRestoreDnodeReq(void *buf, int32_t bufLen, SRestoreDnodeReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@ -2105,6 +2178,7 @@ int32_t tSerializeSRestoreDnodeReq(void *buf, int32_t bufLen, SRestoreDnodeReq *
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeI32(&encoder, pReq->dnodeId) < 0) return -1;
if (tEncodeI8(&encoder, pReq->restoreType) < 0) return -1;
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -2119,12 +2193,17 @@ int32_t tDeserializeSRestoreDnodeReq(void *buf, int32_t bufLen, SRestoreDnodeReq
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->dnodeId) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->restoreType) < 0) return -1;
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSRestoreDnodeReq(SRestoreDnodeReq *pReq) {
FREESQL();
}
int32_t tSerializeSMCfgDnodeReq(void *buf, int32_t bufLen, SMCfgDnodeReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@ -2133,6 +2212,7 @@ int32_t tSerializeSMCfgDnodeReq(void *buf, int32_t bufLen, SMCfgDnodeReq *pReq)
if (tEncodeI32(&encoder, pReq->dnodeId) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->config) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->value) < 0) return -1;
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -2148,12 +2228,17 @@ int32_t tDeserializeSMCfgDnodeReq(void *buf, int32_t bufLen, SMCfgDnodeReq *pReq
if (tDecodeI32(&decoder, &pReq->dnodeId) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->config) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->value) < 0) return -1;
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSMCfgDnodeReq(SMCfgDnodeReq *pReq) {
FREESQL();
}
int32_t tSerializeSDCfgDnodeReq(void *buf, int32_t bufLen, SDCfgDnodeReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@ -2188,6 +2273,7 @@ int32_t tSerializeSCreateDnodeReq(void *buf, int32_t bufLen, SCreateDnodeReq *pR
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->fqdn) < 0) return -1;
if (tEncodeI32(&encoder, pReq->port) < 0) return -1;
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -2202,12 +2288,17 @@ int32_t tDeserializeSCreateDnodeReq(void *buf, int32_t bufLen, SCreateDnodeReq *
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->fqdn) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->port) < 0) return -1;
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSCreateDnodeReq(SCreateDnodeReq *pReq) {
FREESQL();
}
int32_t tSerializeSCreateFuncReq(void *buf, int32_t bufLen, SCreateFuncReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@ -2695,6 +2786,8 @@ int32_t tSerializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq) {
}
if (tEncodeI32(&encoder, pReq->tsdbPageSize) < 0) return -1;
if (tEncodeI32(&encoder, pReq->keepTimeOffset) < 0) return -1;
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -2762,6 +2855,8 @@ int32_t tDeserializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq)
if (tDecodeI32(&decoder, &pReq->keepTimeOffset) < 0) return -1;
}
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
@ -2771,6 +2866,7 @@ int32_t tDeserializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq)
void tFreeSCreateDbReq(SCreateDbReq *pReq) {
taosArrayDestroy(pReq->pRetensions);
pReq->pRetensions = NULL;
FREESQL();
}
int32_t tSerializeSAlterDbReq(void *buf, int32_t bufLen, SAlterDbReq *pReq) {
@ -2800,6 +2896,7 @@ int32_t tSerializeSAlterDbReq(void *buf, int32_t bufLen, SAlterDbReq *pReq) {
if (tEncodeI32(&encoder, pReq->walRetentionPeriod) < 0) return -1;
if (tEncodeI32(&encoder, pReq->walRetentionSize) < 0) return -1;
if (tEncodeI32(&encoder, pReq->keepTimeOffset) < 0) return -1;
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -2847,12 +2944,18 @@ int32_t tDeserializeSAlterDbReq(void *buf, int32_t bufLen, SAlterDbReq *pReq) {
if (!tDecodeIsEnd(&decoder)) {
if (tDecodeI32(&decoder, &pReq->keepTimeOffset) < 0) return -1;
}
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSAlterDbReq(SAlterDbReq *pReq) {
FREESQL();
}
int32_t tSerializeSDropDbReq(void *buf, int32_t bufLen, SDropDbReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@ -2860,6 +2963,7 @@ int32_t tSerializeSDropDbReq(void *buf, int32_t bufLen, SDropDbReq *pReq) {
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->db) < 0) return -1;
if (tEncodeI8(&encoder, pReq->ignoreNotExists) < 0) return -1;
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -2874,12 +2978,17 @@ int32_t tDeserializeSDropDbReq(void *buf, int32_t bufLen, SDropDbReq *pReq) {
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->db) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->ignoreNotExists) < 0) return -1;
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSDropDbReq(SDropDbReq *pReq) {
FREESQL();
}
int32_t tSerializeSDropDbRsp(void *buf, int32_t bufLen, SDropDbRsp *pRsp) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@ -3134,6 +3243,7 @@ int32_t tSerializeSCompactDbReq(void *buf, int32_t bufLen, SCompactDbReq *pReq)
if (tEncodeCStr(&encoder, pReq->db) < 0) return -1;
if (tEncodeI64(&encoder, pReq->timeRange.skey) < 0) return -1;
if (tEncodeI64(&encoder, pReq->timeRange.ekey) < 0) return -1;
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -3149,12 +3259,17 @@ int32_t tDeserializeSCompactDbReq(void *buf, int32_t bufLen, SCompactDbReq *pReq
if (tDecodeCStrTo(&decoder, pReq->db) < 0) return -1;
if (tDecodeI64(&decoder, &pReq->timeRange.skey) < 0) return -1;
if (tDecodeI64(&decoder, &pReq->timeRange.ekey) < 0) return -1;
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSCompactDbReq(SCompactDbReq *pReq) {
FREESQL();
}
int32_t tSerializeSUseDbRspImp(SEncoder *pEncoder, const SUseDbRsp *pRsp) {
if (tEncodeCStr(pEncoder, pRsp->db) < 0) return -1;
if (tEncodeI64(pEncoder, pRsp->uid) < 0) return -1;
@ -4305,6 +4420,7 @@ int32_t tSerializeSMDropTopicReq(void *buf, int32_t bufLen, SMDropTopicReq *pReq
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->name) < 0) return -1;
if (tEncodeI8(&encoder, pReq->igNotExists) < 0) return -1;
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -4319,12 +4435,17 @@ int32_t tDeserializeSMDropTopicReq(void *buf, int32_t bufLen, SMDropTopicReq *pR
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->igNotExists) < 0) return -1;
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSMDropTopicReq(SMDropTopicReq *pReq) {
FREESQL();
}
int32_t tSerializeSMDropCgroupReq(void *buf, int32_t bufLen, SMDropCgroupReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@ -5190,6 +5311,7 @@ int32_t tSerializeSBalanceVgroupReq(void *buf, int32_t bufLen, SBalanceVgroupReq
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeI32(&encoder, pReq->useless) < 0) return -1;
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -5203,18 +5325,25 @@ int32_t tDeserializeSBalanceVgroupReq(void *buf, int32_t bufLen, SBalanceVgroupR
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->useless) < 0) return -1;
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSBalanceVgroupReq(SBalanceVgroupReq *pReq) {
FREESQL();
}
int32_t tSerializeSBalanceVgroupLeaderReq(void *buf, int32_t bufLen, SBalanceVgroupLeaderReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeI32(&encoder, pReq->useless) < 0) return -1;
if (tEncodeI32(&encoder, pReq->vgId) < 0) return -1;
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -5228,12 +5357,21 @@ int32_t tDeserializeSBalanceVgroupLeaderReq(void *buf, int32_t bufLen, SBalanceV
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->useless) < 0) return -1;
if(!tDecodeIsEnd(&decoder)){
if (tDecodeI32(&decoder, &pReq->vgId) < 0) return -1;
}
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSBalanceVgroupLeaderReq(SBalanceVgroupLeaderReq *pReq) {
FREESQL();
}
int32_t tSerializeSMergeVgroupReq(void *buf, int32_t bufLen, SMergeVgroupReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@ -5270,6 +5408,7 @@ int32_t tSerializeSRedistributeVgroupReq(void *buf, int32_t bufLen, SRedistribut
if (tEncodeI32(&encoder, pReq->dnodeId1) < 0) return -1;
if (tEncodeI32(&encoder, pReq->dnodeId2) < 0) return -1;
if (tEncodeI32(&encoder, pReq->dnodeId3) < 0) return -1;
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -5286,12 +5425,17 @@ int32_t tDeserializeSRedistributeVgroupReq(void *buf, int32_t bufLen, SRedistrib
if (tDecodeI32(&decoder, &pReq->dnodeId1) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->dnodeId2) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->dnodeId3) < 0) return -1;
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSRedistributeVgroupReq(SRedistributeVgroupReq *pReq) {
FREESQL();
}
int32_t tSerializeSSplitVgroupReq(void *buf, int32_t bufLen, SSplitVgroupReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@ -5914,6 +6058,7 @@ int32_t tDeserializeSOperatorParam(SDecoder *pDecoder, SOperatorParam *pOpParam)
if (uidNum > 0) {
pScan->pUidList = taosArrayInit(uidNum, sizeof(int64_t));
if (NULL == pScan->pUidList) return -1;
for (int32_t m = 0; m < uidNum; ++m) {
if (tDecodeI64(pDecoder, &uid) < 0) return -1;
taosArrayPush(pScan->pUidList, &uid);
@ -5930,6 +6075,7 @@ int32_t tDeserializeSOperatorParam(SDecoder *pDecoder, SOperatorParam *pOpParam)
int32_t childrenNum = 0;
if (tDecodeI32(pDecoder, &childrenNum) < 0) return -1;
if (childrenNum > 0) {
pOpParam->pChildren = taosArrayInit(childrenNum, POINTER_BYTES);
if (NULL == pOpParam->pChildren) return -1;
@ -6812,6 +6958,8 @@ int32_t tSerializeSMDropStreamReq(void *buf, int32_t bufLen, const SMDropStreamR
if (tEncodeCStr(&encoder, pReq->name) < 0) return -1;
if (tEncodeI8(&encoder, pReq->igNotExists) < 0) return -1;
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -6827,12 +6975,18 @@ int32_t tDeserializeSMDropStreamReq(void *buf, int32_t bufLen, SMDropStreamReq *
if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->igNotExists) < 0) return -1;
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSMDropStreamReq(SMDropStreamReq *pReq) {
FREESQL();
}
int32_t tSerializeSMRecoverStreamReq(void *buf, int32_t bufLen, const SMRecoverStreamReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@ -6974,6 +7128,11 @@ int tEncodeSVCreateTbReq(SEncoder *pCoder, const SVCreateTbReq *pReq) {
} else {
ASSERT(0);
}
//ENCODESQL
if(pReq->sqlLen > 0 && pReq->sql != NULL) {
if (tEncodeI32(pCoder, pReq->sqlLen) < 0) return -1;
if (tEncodeBinary(pCoder, pReq->sql, pReq->sqlLen) < 0) return -1;
}
tEndEncode(pCoder);
return 0;
@ -7017,6 +7176,14 @@ int tDecodeSVCreateTbReq(SDecoder *pCoder, SVCreateTbReq *pReq) {
ASSERT(0);
}
//DECODESQL
if(!tDecodeIsEnd(pCoder)){
if(tDecodeI32(pCoder, &pReq->sqlLen) < 0) return -1;
if(pReq->sqlLen > 0){
if (tDecodeBinaryAlloc(pCoder, (void**)&pReq->sql, NULL) < 0) return -1;
}
}
tEndDecode(pCoder);
return 0;
}
@ -7038,6 +7205,11 @@ void tDestroySVCreateTbReq(SVCreateTbReq *pReq, int32_t flags) {
if (pReq->ntb.schemaRow.pSchema) taosMemoryFree(pReq->ntb.schemaRow.pSchema);
}
}
if(pReq->sql != NULL){
taosMemoryFree(pReq->sql);
}
pReq->sql = NULL;
}
int tEncodeSVCreateTbBatchReq(SEncoder *pCoder, const SVCreateTbBatchReq *pReq) {

View File

@ -756,7 +756,8 @@ int32_t taosTimeCountIntervalForFill(int64_t skey, int64_t ekey, int64_t interva
}
int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval) {
if (pInterval->sliding == 0 && pInterval->interval == 0) {
if (pInterval->sliding == 0) {
ASSERT(pInterval->interval == 0);
return ts;
}

View File

@ -21,7 +21,7 @@ static int32_t dmStartMgmt(SDnodeMgmt *pMgmt) {
if (dmStartStatusThread(pMgmt) != 0) {
return -1;
}
#if defined(TD_ENTERPRISE) && !defined(_TD_DARWIN_64)
#if defined(TD_ENTERPRISE)
if (dmStartNotifyThread(pMgmt) != 0) {
return -1;
}
@ -39,7 +39,9 @@ static void dmStopMgmt(SDnodeMgmt *pMgmt) {
pMgmt->pData->stopped = true;
dmStopMonitorThread(pMgmt);
dmStopStatusThread(pMgmt);
#if defined(TD_ENTERPRISE)
dmStopNotifyThread(pMgmt);
#endif
dmStopCrashReportThread(pMgmt);
}

View File

@ -80,15 +80,18 @@ int32_t mmProcessDropReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
if (pInput->pData->dnodeId != 0 && dropReq.dnodeId != pInput->pData->dnodeId) {
terrno = TSDB_CODE_INVALID_OPTION;
dGError("failed to drop mnode since %s", terrstr());
tFreeSMCreateQnodeReq(&dropReq);
return -1;
}
SMnodeOpt option = {.deploy = false};
if (mmWriteFile(pInput->path, &option) != 0) {
dGError("failed to write mnode file since %s", terrstr());
tFreeSMCreateQnodeReq(&dropReq);
return -1;
}
tFreeSMCreateQnodeReq(&dropReq);
return 0;
}
@ -213,6 +216,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_STOP_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_CHECK_POINT_SOURCE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_UPDATE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_RESET_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_HEARTBEAT, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_CONFIG_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;

View File

@ -39,15 +39,18 @@ int32_t qmProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
if (pInput->pData->dnodeId != 0 && createReq.dnodeId != pInput->pData->dnodeId) {
terrno = TSDB_CODE_INVALID_OPTION;
dError("failed to create qnode since %s", terrstr());
tFreeSMCreateQnodeReq(&createReq);
return -1;
}
bool deployed = true;
if (dmWriteFile(pInput->path, pInput->name, deployed) != 0) {
dError("failed to write qnode file since %s", terrstr());
tFreeSMCreateQnodeReq(&createReq);
return -1;
}
tFreeSMCreateQnodeReq(&createReq);
return 0;
}
@ -61,15 +64,18 @@ int32_t qmProcessDropReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
if (pInput->pData->dnodeId != 0 && dropReq.dnodeId != pInput->pData->dnodeId) {
terrno = TSDB_CODE_INVALID_OPTION;
dError("failed to drop qnode since %s", terrstr());
tFreeSMCreateQnodeReq(&dropReq);
return -1;
}
bool deployed = false;
if (dmWriteFile(pInput->path, pInput->name, deployed) != 0) {
dError("failed to write qnode file since %s", terrstr());
tFreeSMCreateQnodeReq(&dropReq);
return -1;
}
tFreeSMCreateQnodeReq(&dropReq);
return 0;
}

View File

@ -28,15 +28,18 @@ int32_t smProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
if (pInput->pData->dnodeId != 0 && createReq.dnodeId != pInput->pData->dnodeId) {
terrno = TSDB_CODE_INVALID_OPTION;
dError("failed to create snode since %s", terrstr());
tFreeSMCreateQnodeReq(&createReq);
return -1;
}
bool deployed = true;
if (dmWriteFile(pInput->path, pInput->name, deployed) != 0) {
dError("failed to write snode file since %s", terrstr());
tFreeSMCreateQnodeReq(&createReq);
return -1;
}
tFreeSMCreateQnodeReq(&createReq);
return 0;
}
@ -50,15 +53,18 @@ int32_t smProcessDropReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
if (pInput->pData->dnodeId != 0 && dropReq.dnodeId != pInput->pData->dnodeId) {
terrno = TSDB_CODE_INVALID_OPTION;
dError("failed to drop snode since %s", terrstr());
tFreeSMCreateQnodeReq(&dropReq);
return -1;
}
bool deployed = false;
if (dmWriteFile(pInput->path, pInput->name, deployed) != 0) {
dError("failed to write snode file since %s", terrstr());
tFreeSMCreateQnodeReq(&dropReq);
return -1;
}
tFreeSMCreateQnodeReq(&dropReq);
return 0;
}

View File

@ -817,13 +817,13 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_SCAN_HISTORY_FINISH_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_CHECK, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_CHECK_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
// if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TRIGGER, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_PAUSE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_RESUME, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_STOP, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_CHECK_POINT_SOURCE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECKPOINT_READY, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_UPDATE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_RESET, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_REPLICA, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_CONFIG, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;

View File

@ -646,6 +646,7 @@ typedef struct {
char name[TSDB_STREAM_FNAME_LEN];
// ctl
SRWLatch lock;
// create info
int64_t createTime;
int64_t updateTime;

View File

@ -40,6 +40,7 @@ SHashObj *mndDupTopicHash(SHashObj *pOld);
int32_t mndValidateUserAuthInfo(SMnode *pMnode, SUserAuthVersion *pUsers, int32_t numOfUses, void **ppRsp,
int32_t *pRspLen);
int32_t mndUserRemoveDb(SMnode *pMnode, STrans *pTrans, char *db);
int32_t mndUserRemoveStb(SMnode *pMnode, STrans *pTrans, char *stb);
int32_t mndUserRemoveTopic(SMnode *pMnode, STrans *pTrans, char *topic);
int32_t mndUserDupObj(SUserObj *pUser, SUserObj *pNew);

View File

@ -759,45 +759,10 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) {
code = mndCreateDb(pMnode, pReq, &createReq, pUser);
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
char detail[3000] = {0};
char tmp[100] = {0};
mndBuildAuditDetailInt32(detail, tmp, "buffer:%d", createReq.buffer);
mndBuildAuditDetailInt32(detail, tmp, "cacheLast:%d", createReq.cacheLast);
mndBuildAuditDetailInt32(detail, tmp, "cacheLastSize:%d", createReq.cacheLastSize);
mndBuildAuditDetailInt32(detail, tmp, "compression:%d", createReq.compression);
mndBuildAuditDetailInt32(detail, tmp, "daysPerFile:%d", createReq.daysPerFile);
mndBuildAuditDetailInt32(detail, tmp, "daysToKeep0:%d", createReq.daysToKeep0);
mndBuildAuditDetailInt32(detail, tmp, "daysToKeep1:%d", createReq.daysToKeep1);
mndBuildAuditDetailInt32(detail, tmp, "daysToKeep2:%d", createReq.daysToKeep2);
mndBuildAuditDetailInt32(detail, tmp, "keepTimeOffset:%d", createReq.keepTimeOffset);
mndBuildAuditDetailInt32(detail, tmp, "hashPrefix:%d", createReq.hashPrefix);
mndBuildAuditDetailInt32(detail, tmp, "hashSuffix:%d", createReq.hashSuffix);
mndBuildAuditDetailInt32(detail, tmp, "ignoreExist:%d", createReq.ignoreExist);
mndBuildAuditDetailInt32(detail, tmp, "maxRows:%d", createReq.maxRows);
mndBuildAuditDetailInt32(detail, tmp, "minRows:%d", createReq.minRows);
mndBuildAuditDetailInt32(detail, tmp, "numOfRetensions:%d", createReq.numOfRetensions);
mndBuildAuditDetailInt32(detail, tmp, "numOfStables:%d", createReq.numOfStables);
mndBuildAuditDetailInt32(detail, tmp, "numOfVgroups:%d", createReq.numOfVgroups);
mndBuildAuditDetailInt32(detail, tmp, "pages:%d", createReq.pages);
mndBuildAuditDetailInt32(detail, tmp, "pageSize:%d", createReq.pageSize);
mndBuildAuditDetailInt32(detail, tmp, "precision:%d", createReq.precision);
mndBuildAuditDetailInt32(detail, tmp, "replications:%d", createReq.replications);
mndBuildAuditDetailInt32(detail, tmp, "schemaless:%d", createReq.schemaless);
mndBuildAuditDetailInt32(detail, tmp, "sstTrigger:%d", createReq.sstTrigger);
mndBuildAuditDetailInt32(detail, tmp, "strict:%d", createReq.strict);
mndBuildAuditDetailInt32(detail, tmp, "tsdbPageSize:%d", createReq.tsdbPageSize);
mndBuildAuditDetailInt32(detail, tmp, "walFsyncPeriod:%d", createReq.walFsyncPeriod);
mndBuildAuditDetailInt32(detail, tmp, "walLevel:%d", createReq.walLevel);
mndBuildAuditDetailInt32(detail, tmp, "walRetentionPeriod:%d", createReq.walRetentionPeriod);
mndBuildAuditDetailInt32(detail, tmp, "walRetentionSize:%" PRId64, createReq.walRetentionSize);
mndBuildAuditDetailInt32(detail, tmp, "walRollPeriod:%d", createReq.walRollPeriod);
mndBuildAuditDetailInt32(detail, tmp, "walSegmentSize:%" PRId64, createReq.walSegmentSize);
SName name = {0};
tNameFromString(&name, createReq.db, T_NAME_ACCT | T_NAME_DB);
auditRecord(pReq, pMnode->clusterId, "createDB", name.dbname, "", detail);
auditRecord(pReq, pMnode->clusterId, "createDB", name.dbname, "", createReq.sql, createReq.sqlLen);
_OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
@ -1049,30 +1014,10 @@ static int32_t mndProcessAlterDbReq(SRpcMsg *pReq) {
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
}
char detail[3000] = {0};
char tmp[100] = {0};
mndBuildAuditDetailInt32(detail, tmp, "buffer:%d", alterReq.buffer);
mndBuildAuditDetailInt32(detail, tmp, "cacheLast:%d", alterReq.cacheLast);
mndBuildAuditDetailInt32(detail, tmp, "cacheLastSize:%d", alterReq.cacheLastSize);
mndBuildAuditDetailInt32(detail, tmp, "daysPerFile:%d", alterReq.daysPerFile);
mndBuildAuditDetailInt32(detail, tmp, "daysToKeep0:%d", alterReq.daysToKeep0);
mndBuildAuditDetailInt32(detail, tmp, "daysToKeep1:%d", alterReq.daysToKeep1);
mndBuildAuditDetailInt32(detail, tmp, "daysToKeep2:%d", alterReq.daysToKeep2);
mndBuildAuditDetailInt32(detail, tmp, "keepTimeOffset:%d", alterReq.keepTimeOffset);
mndBuildAuditDetailInt32(detail, tmp, "minRows:%d", alterReq.minRows);
mndBuildAuditDetailInt32(detail, tmp, "pages:%d", alterReq.pages);
mndBuildAuditDetailInt32(detail, tmp, "pageSize:%d", alterReq.pageSize);
mndBuildAuditDetailInt32(detail, tmp, "replications:%d", alterReq.replications);
mndBuildAuditDetailInt32(detail, tmp, "sstTrigger:%d", alterReq.sstTrigger);
mndBuildAuditDetailInt32(detail, tmp, "strict:%d", alterReq.strict);
mndBuildAuditDetailInt32(detail, tmp, "walFsyncPeriod:%d", alterReq.walFsyncPeriod);
mndBuildAuditDetailInt32(detail, tmp, "walRetentionSize:%d", alterReq.walRetentionSize);
SName name = {0};
tNameFromString(&name, alterReq.db, T_NAME_ACCT | T_NAME_DB);
auditRecord(pReq, pMnode->clusterId, "alterDB", name.dbname, "", detail);
auditRecord(pReq, pMnode->clusterId, "alterDB", name.dbname, "", alterReq.sql, alterReq.sqlLen);
_OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
@ -1082,6 +1027,7 @@ _OVER:
mndReleaseDb(pMnode, pDb);
taosArrayDestroy(dbObj.cfg.pRetensions);
tFreeSAlterDbReq(&alterReq);
terrno = code;
return code;
@ -1364,13 +1310,10 @@ static int32_t mndProcessDropDbReq(SRpcMsg *pReq) {
code = TSDB_CODE_ACTION_IN_PROGRESS;
}
char detail[1000] = {0};
sprintf(detail, "ignoreNotExists:%d", dropReq.ignoreNotExists);
SName name = {0};
tNameFromString(&name, dropReq.db, T_NAME_ACCT | T_NAME_DB);
auditRecord(pReq, pMnode->clusterId, "dropDB", name.dbname, "", detail);
auditRecord(pReq, pMnode->clusterId, "dropDB", name.dbname, "", dropReq.sql, dropReq.sqlLen);
_OVER:
if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_ACTION_IN_PROGRESS) {
@ -1378,6 +1321,7 @@ _OVER:
}
mndReleaseDb(pMnode, pDb);
tFreeSDropDbReq(&dropReq);
return code;
}

View File

@ -720,8 +720,8 @@ static int32_t mndProcessNotifyReq(SRpcMsg *pReq) {
mndReleaseVgroup(pMnode, pVgroup);
}
}
mndUpdClusterInfo(pReq);
_OVER:
mndUpdClusterInfo(pReq);
tFreeSNotifyReq(&notifyReq);
return code;
}
@ -781,10 +781,11 @@ static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfg
SDnodeObj tmpDnode = *pDnode;
if (action == DND_ACTIVE_CODE) {
#ifndef TD_CHECK_ACTIVE
strncpy(tmpDnode.active, pCfgReq->value, TSDB_ACTIVE_KEY_LEN);
#else
#ifndef TD_GRANT_OPTIMIZE
if (grantAlterActiveCode(pDnode->active, pCfgReq->value, tmpDnode.active, 0) != 0) {
#else
if (grantAlterActiveCode(pDnode->id, pDnode->active, pCfgReq->value, tmpDnode.active, 0) != 0) {
#endif
if (TSDB_CODE_DUP_KEY != terrno) {
mError("dnode:%d, config dnode:%d, app:%p config:%s value:%s failed since %s", pDnode->id, pCfgReq->dnodeId,
pReq->info.ahandle, pCfgReq->config, pCfgReq->value, terrstr());
@ -799,12 +800,12 @@ static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfg
if (cfgAll) continue;
goto _OVER;
}
#endif
} else if (action == DND_CONN_ACTIVE_CODE) {
#ifndef TD_CHECK_ACTIVE
strncpy(tmpDnode.connActive, pCfgReq->value, TSDB_CONN_ACTIVE_KEY_LEN);
#else
#ifndef TD_GRANT_OPTIMIZE
if (grantAlterActiveCode(pDnode->connActive, pCfgReq->value, tmpDnode.connActive, 1) != 0) {
#else
if (grantAlterActiveCode(pDnode->id, pDnode->connActive, pCfgReq->value, tmpDnode.connActive, 1) != 0) {
#endif
if (TSDB_CODE_DUP_KEY != terrno) {
mError("dnode:%d, config dnode:%d, app:%p config:%s value:%s failed since %s", pDnode->id, pCfgReq->dnodeId,
pReq->info.ahandle, pCfgReq->config, pCfgReq->value, terrstr());
@ -819,7 +820,6 @@ static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfg
if (cfgAll) continue;
goto _OVER;
}
#endif
} else {
terrno = TSDB_CODE_INVALID_CFG;
goto _OVER;
@ -1025,7 +1025,7 @@ static int32_t mndProcessCreateDnodeReq(SRpcMsg *pReq) {
char obj[200] = {0};
sprintf(obj, "%s:%d", createReq.fqdn, createReq.port);
auditRecord(pReq, pMnode->clusterId, "createDnode", obj, "", "");
auditRecord(pReq, pMnode->clusterId, "createDnode", obj, "", createReq.sql, createReq.sqlLen);
_OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
@ -1033,6 +1033,7 @@ _OVER:
}
mndReleaseDnode(pMnode, pDnode);
tFreeSCreateDnodeReq(&createReq);
return code;
}
@ -1173,13 +1174,7 @@ static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq) {
char obj1[30] = {0};
sprintf(obj1, "%d", dropReq.dnodeId);
// char obj2[150] = {0};
// sprintf(obj2, "%s:%d", dropReq.fqdn, dropReq.port);
char detail[100] = {0};
sprintf(detail, "force:%d, unsafe:%d", dropReq.force, dropReq.unsafe);
auditRecord(pReq, pMnode->clusterId, "dropDnode", obj1, "", detail);
auditRecord(pReq, pMnode->clusterId, "dropDnode", obj1, "", dropReq.sql, dropReq.sqlLen);
_OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
@ -1190,6 +1185,7 @@ _OVER:
mndReleaseMnode(pMnode, pMObj);
mndReleaseQnode(pMnode, pQObj);
mndReleaseSnode(pMnode, pSObj);
tFreeSDropDnodeReq(&dropReq);
return code;
}
@ -1198,7 +1194,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
const char *options[] = {
"debugFlag", "dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag",
"tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tmrDebugFlag",
"uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag",
"uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag", "stDebugFlag",
};
int32_t optionSize = tListLen(options);
@ -1210,6 +1206,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
mInfo("dnode:%d, start to config, option:%s, value:%s", cfgReq.dnodeId, cfgReq.config, cfgReq.value);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CONFIG_DNODE) != 0) {
tFreeSMCfgDnodeReq(&cfgReq);
return -1;
}
@ -1220,6 +1217,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
if (' ' != cfgReq.config[7] && 0 != cfgReq.config[7]) {
mError("dnode:%d, failed to config monitor since invalid conf:%s", cfgReq.dnodeId, cfgReq.config);
terrno = TSDB_CODE_INVALID_CFG;
tFreeSMCfgDnodeReq(&cfgReq);
return -1;
}
@ -1231,6 +1229,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
if (flag < 0 || flag > 2) {
mError("dnode:%d, failed to config monitor since value:%d", cfgReq.dnodeId, flag);
terrno = TSDB_CODE_INVALID_CFG;
tFreeSMCfgDnodeReq(&cfgReq);
return -1;
}
@ -1246,6 +1245,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
mError("dnode:%d, failed to config ttlPushInterval since value:%d. Valid range: [0, 100000]", cfgReq.dnodeId,
flag);
terrno = TSDB_CODE_INVALID_CFG;
tFreeSMCfgDnodeReq(&cfgReq);
return -1;
}
@ -1261,11 +1261,27 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
mError("dnode:%d, failed to config ttlBatchDropNum since value:%d. Valid range: [0, %d]", cfgReq.dnodeId, flag,
INT32_MAX);
terrno = TSDB_CODE_INVALID_CFG;
tFreeSMCfgDnodeReq(&cfgReq);
return -1;
}
strcpy(dcfgReq.config, "ttlbatchdropnum");
snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%d", flag);
} else if (strncasecmp(cfgReq.config, "asynclog", 8) == 0) {
int32_t optLen = strlen("asynclog");
int32_t flag = -1;
int32_t code = mndMCfgGetValInt32(&cfgReq, optLen, &flag);
if (code < 0) return code;
if (flag < 0 || flag > 1) {
mError("dnode:%d, failed to config asynclog since value:%d. Valid range: [0, 1]", cfgReq.dnodeId, flag);
terrno = TSDB_CODE_INVALID_CFG;
tFreeSMCfgDnodeReq(&cfgReq);
return -1;
}
strcpy(dcfgReq.config, "asynclog");
snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%d", flag);
#ifdef TD_ENTERPRISE
} else if (strncasecmp(cfgReq.config, "supportvnodes", 13) == 0) {
int32_t optLen = strlen("supportvnodes");
@ -1276,6 +1292,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
if (flag < 0 || flag > 4096) {
mError("dnode:%d, failed to config supportVnodes since value:%d. Valid range: [0, 4096]", cfgReq.dnodeId, flag);
terrno = TSDB_CODE_INVALID_CFG;
tFreeSMCfgDnodeReq(&cfgReq);
return -1;
}
if (flag == 0) {
@ -1291,6 +1308,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
if (' ' != cfgReq.config[index] && 0 != cfgReq.config[index]) {
mError("dnode:%d, failed to config activeCode since invalid conf:%s", cfgReq.dnodeId, cfgReq.config);
terrno = TSDB_CODE_INVALID_CFG;
tFreeSMCfgDnodeReq(&cfgReq);
return -1;
}
int32_t vlen = strlen(cfgReq.value);
@ -1300,6 +1318,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
mError("dnode:%d, failed to config activeCode since invalid vlen:%d. conf:%s, val:%s", cfgReq.dnodeId, vlen,
cfgReq.config, cfgReq.value);
terrno = TSDB_CODE_INVALID_OPTION;
tFreeSMCfgDnodeReq(&cfgReq);
return -1;
}
@ -1307,10 +1326,11 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%s", cfgReq.value);
if (mndConfigDnode(pMnode, pReq, &cfgReq, opt) != 0) {
mError("dnode:%d, failed to config activeCode since %s. conf:%s, val:%s", cfgReq.dnodeId, terrstr(),
cfgReq.config, cfgReq.value);
mError("dnode:%d, failed to config activeCode since %s", cfgReq.dnodeId, terrstr());
tFreeSMCfgDnodeReq(&cfgReq);
return -1;
}
tFreeSMCfgDnodeReq(&cfgReq);
return 0;
#endif
} else {
@ -1323,6 +1343,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
if (' ' != cfgReq.config[optLen] && 0 != cfgReq.config[optLen]) {
mError("dnode:%d, failed to config since invalid conf:%s", cfgReq.dnodeId, cfgReq.config);
terrno = TSDB_CODE_INVALID_CFG;
tFreeSMCfgDnodeReq(&cfgReq);
return -1;
}
@ -1334,6 +1355,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
if (flag < 0 || flag > 255) {
mError("dnode:%d, failed to config %s since value:%d", cfgReq.dnodeId, optName, flag);
terrno = TSDB_CODE_INVALID_CFG;
tFreeSMCfgDnodeReq(&cfgReq);
return -1;
}
@ -1345,6 +1367,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
if (!findOpt) {
terrno = TSDB_CODE_INVALID_CFG;
mError("dnode:%d, failed to config since %s", cfgReq.dnodeId, terrstr());
tFreeSMCfgDnodeReq(&cfgReq);
return -1;
}
}
@ -1352,10 +1375,9 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
char obj[50] = {0};
sprintf(obj, "%d", cfgReq.dnodeId);
char detail[500] = {0};
sprintf(detail, "config:%s, value:%s", cfgReq.config, cfgReq.value);
auditRecord(pReq, pMnode->clusterId, "alterDnode", obj, "", cfgReq.sql, cfgReq.sqlLen);
auditRecord(pReq, pMnode->clusterId, "alterDnode", obj, "", detail);
tFreeSMCfgDnodeReq(&cfgReq);
int32_t code = -1;
SSdb *pSdb = pMnode->pSdb;

View File

@ -131,7 +131,13 @@ void grantAdd(EGrantType grant, uint64_t value) {}
void grantRestore(EGrantType grant, uint64_t value) {}
int32_t dmProcessGrantReq(void *pInfo, SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; }
int32_t dmProcessGrantNotify(void *pInfo, SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; }
#ifndef TD_GRANT_OPTIMIZE
int32_t grantAlterActiveCode(const char *old, const char *new, char *out, int8_t type) { return TSDB_CODE_SUCCESS; }
#else
int32_t grantAlterActiveCode(int32_t did, const char *old, const char *new, char *out, int8_t type) {
return TSDB_CODE_SUCCESS;
}
#endif
#endif

View File

@ -439,7 +439,7 @@ static int32_t mndProcessCreateIdxReq(SRpcMsg *pReq) {
pDb = mndAcquireDbByStb(pMnode, createReq.stbName);
if (pDb == NULL) {
terrno = TSDB_CODE_MND_INVALID_DB;
terrno = TSDB_CODE_MND_DB_NOT_EXIST;
goto _OVER;
}

View File

@ -656,7 +656,7 @@ static int32_t mndProcessCreateMnodeReq(SRpcMsg *pReq) {
char obj[40] = {0};
sprintf(obj, "%d", createReq.dnodeId);
auditRecord(pReq, pMnode->clusterId, "createMnode", obj, "", "");
auditRecord(pReq, pMnode->clusterId, "createMnode", obj, "", createReq.sql, createReq.sqlLen);
_OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
@ -665,6 +665,7 @@ _OVER:
mndReleaseMnode(pMnode, pObj);
mndReleaseDnode(pMnode, pDnode);
tFreeSMCreateQnodeReq(&createReq);
return code;
}
@ -797,7 +798,7 @@ static int32_t mndProcessDropMnodeReq(SRpcMsg *pReq) {
char obj[40] = {0};
sprintf(obj, "%d", dropReq.dnodeId);
auditRecord(pReq, pMnode->clusterId, "dropMnode", obj, "", "");
auditRecord(pReq, pMnode->clusterId, "dropMnode", obj, "", dropReq.sql, dropReq.sqlLen);
_OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
@ -805,6 +806,7 @@ _OVER:
}
mndReleaseMnode(pMnode, pObj);
tFreeSMCreateQnodeReq(&dropReq);
return code;
}

View File

@ -259,7 +259,7 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
if (pDb == NULL) {
if (0 != strcmp(connReq.db, TSDB_INFORMATION_SCHEMA_DB) &&
(0 != strcmp(connReq.db, TSDB_PERFORMANCE_SCHEMA_DB))) {
terrno = TSDB_CODE_MND_INVALID_DB;
terrno = TSDB_CODE_MND_DB_NOT_EXIST;
mGError("user:%s, failed to login from %s while use db:%s since %s", pReq->info.conn.user, ip, connReq.db,
terrstr());
goto _OVER;
@ -314,10 +314,10 @@ _CONNECT:
sprintf(obj, "%s:%d", ip, pConn->port);
char detail[1000] = {0};
sprintf(detail, "connType:%d, db:%s, pid:%d, startTime:%" PRId64 ", sVer:%s, app:%s",
sprintf(detail, "connType:%d, db:%s, pid:%d, startTime:%" PRId64 ", sVer:%s, app:%s",
connReq.connType, connReq.db, connReq.pid, connReq.startTime, connReq.sVer, connReq.app);
auditRecord(pReq, pMnode->clusterId, "login", connReq.user, obj, detail);
auditRecord(pReq, pMnode->clusterId, "login", connReq.user, obj, detail, strlen(detail));
_OVER:

View File

@ -310,7 +310,7 @@ static int32_t mndProcessCreateQnodeReq(SRpcMsg *pReq) {
char obj[33] = {0};
sprintf(obj, "%d", createReq.dnodeId);
auditRecord(pReq, pMnode->clusterId, "createQnode", obj, "", "");
auditRecord(pReq, pMnode->clusterId, "createQnode", obj, "", createReq.sql, createReq.sqlLen);
_OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
mError("qnode:%d, failed to create since %s", createReq.dnodeId, terrstr());
@ -318,6 +318,7 @@ _OVER:
mndReleaseQnode(pMnode, pObj);
mndReleaseDnode(pMnode, pDnode);
tFreeSMCreateQnodeReq(&createReq);
return code;
}
@ -423,7 +424,7 @@ static int32_t mndProcessDropQnodeReq(SRpcMsg *pReq) {
char obj[33] = {0};
sprintf(obj, "%d", dropReq.dnodeId);
auditRecord(pReq, pMnode->clusterId, "dropQnode", obj, "", "");
auditRecord(pReq, pMnode->clusterId, "dropQnode", obj, "", dropReq.sql, dropReq.sqlLen);
_OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
@ -431,6 +432,7 @@ _OVER:
}
mndReleaseQnode(pMnode, pObj);
tFreeSMCreateQnodeReq(&dropReq);
return code;
}

View File

@ -27,8 +27,8 @@
#define SINK_NODE_LEVEL (0)
extern bool tsDeployOnSnode;
static int32_t mndAddSinkTaskToStream(SStreamObj* pStream, SArray* pTaskList, SMnode* pMnode, int32_t vgId,
SVgObj* pVgroup, SEpSet* pEpset, int32_t fillHistory);
static int32_t doAddSinkTask(SStreamObj* pStream, SArray* pTaskList, SMnode* pMnode, int32_t vgId, SVgObj* pVgroup,
SEpSet* pEpset, bool isFillhistory);
int32_t mndConvertRsmaTask(char** pDst, int32_t* pDstLen, const char* ast, int64_t uid, int8_t triggerType,
int64_t watermark, int64_t deleteMark) {
@ -87,15 +87,17 @@ END:
}
int32_t mndSetSinkTaskInfo(SStreamObj* pStream, SStreamTask* pTask) {
STaskOutputInfo* pInfo = &pTask->outputInfo;
if (pStream->smaId != 0) {
pTask->outputInfo.type = TASK_OUTPUT__SMA;
pTask->smaSink.smaId = pStream->smaId;
pInfo->type = TASK_OUTPUT__SMA;
pInfo->smaSink.smaId = pStream->smaId;
} else {
pTask->outputInfo.type = TASK_OUTPUT__TABLE;
pTask->tbSink.stbUid = pStream->targetStbUid;
memcpy(pTask->tbSink.stbFullName, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN);
pTask->tbSink.pSchemaWrapper = tCloneSSchemaWrapper(&pStream->outputSchema);
if (pTask->tbSink.pSchemaWrapper == NULL) {
pInfo->type = TASK_OUTPUT__TABLE;
pInfo->tbSink.stbUid = pStream->targetStbUid;
memcpy(pInfo->tbSink.stbFullName, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN);
pInfo->tbSink.pSchemaWrapper = tCloneSSchemaWrapper(&pStream->outputSchema);
if (pInfo->tbSink.pSchemaWrapper == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
}
@ -113,7 +115,7 @@ int32_t mndAddDispatcherForInternalTask(SMnode* pMnode, SStreamObj* pStream, SAr
isShuffle = true;
pTask->outputInfo.type = TASK_OUTPUT__SHUFFLE_DISPATCH;
pTask->msgInfo.msgType = TDMT_STREAM_TASK_DISPATCH;
if (mndExtractDbInfo(pMnode, pDb, &pTask->shuffleDispatcher.dbInfo, NULL) < 0) {
if (mndExtractDbInfo(pMnode, pDb, &pTask->outputInfo.shuffleDispatcher.dbInfo, NULL) < 0) {
return -1;
}
}
@ -124,8 +126,8 @@ int32_t mndAddDispatcherForInternalTask(SMnode* pMnode, SStreamObj* pStream, SAr
int32_t numOfSinkNodes = taosArrayGetSize(pSinkNodeList);
if (isShuffle) {
memcpy(pTask->shuffleDispatcher.stbFullName, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN);
SArray* pVgs = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
memcpy(pTask->outputInfo.shuffleDispatcher.stbFullName, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN);
SArray* pVgs = pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos;
int32_t numOfVgroups = taosArrayGetSize(pVgs);
for (int32_t i = 0; i < numOfVgroups; i++) {
@ -207,8 +209,7 @@ SVgObj* mndSchedFetchOneVg(SMnode* pMnode, int64_t dbUid) {
}
// create sink node for each vgroup.
int32_t mndAddShuffleSinkTasksToStream(SMnode* pMnode, SArray* pTaskList, SStreamObj* pStream, SEpSet* pEpset,
int32_t fillHistory) {
int32_t doAddShuffleSinkTask(SMnode* pMnode, SArray* pTaskList, SStreamObj* pStream, SEpSet* pEpset, bool fillHistory) {
SSdb* pSdb = pMnode->pSdb;
void* pIter = NULL;
@ -224,17 +225,17 @@ int32_t mndAddShuffleSinkTasksToStream(SMnode* pMnode, SArray* pTaskList, SStrea
continue;
}
mndAddSinkTaskToStream(pStream, pTaskList, pMnode, pVgroup->vgId, pVgroup, pEpset, fillHistory);
doAddSinkTask(pStream, pTaskList, pMnode, pVgroup->vgId, pVgroup, pEpset, fillHistory);
sdbRelease(pSdb, pVgroup);
}
return 0;
}
int32_t mndAddSinkTaskToStream(SStreamObj* pStream, SArray* pTaskList, SMnode* pMnode, int32_t vgId, SVgObj* pVgroup,
SEpSet* pEpset, int32_t fillHistory) {
int64_t uid = (fillHistory == 0)? pStream->uid:pStream->hTaskUid;
SStreamTask* pTask = tNewStreamTask(uid, TASK_LEVEL__SINK, fillHistory, 0, pTaskList);
int32_t doAddSinkTask(SStreamObj* pStream, SArray* pTaskList, SMnode* pMnode, int32_t vgId, SVgObj* pVgroup,
SEpSet* pEpset, bool isFillhistory) {
int64_t uid = (isFillhistory)? pStream->hTaskUid:pStream->uid;
SStreamTask* pTask = tNewStreamTask(uid, TASK_LEVEL__SINK, isFillhistory, 0, pTaskList, pStream->conf.fillHistory);
if (pTask == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
@ -248,17 +249,16 @@ int32_t mndAddSinkTaskToStream(SStreamObj* pStream, SArray* pTaskList, SMnode* p
return 0;
}
static int32_t addSourceStreamTask(SMnode* pMnode, SVgObj* pVgroup, SArray* pTaskList, SArray* pSinkTaskList,
SStreamObj* pStream, SSubplan* plan, uint64_t uid, SEpSet* pEpset,
int8_t fillHistory, bool hasExtraSink, int64_t firstWindowSkey) {
SStreamTask* pTask = tNewStreamTask(uid, TASK_LEVEL__SOURCE, fillHistory, pStream->conf.triggerParam, pTaskList);
static int32_t addSourceTask(SMnode* pMnode, SVgObj* pVgroup, SArray* pTaskList, SArray* pSinkTaskList,
SStreamObj* pStream, SSubplan* plan, uint64_t uid, SEpSet* pEpset, bool fillHistory,
bool hasExtraSink, int64_t firstWindowSkey, bool hasFillHistory) {
SStreamTask* pTask =
tNewStreamTask(uid, TASK_LEVEL__SOURCE, fillHistory, pStream->conf.triggerParam, pTaskList, hasFillHistory);
if (pTask == NULL) {
return terrno;
}
epsetAssign(&pTask->info.mnodeEpset, pEpset);
// todo set the correct ts, which should be last key of queried table.
STimeWindow* pWindow = &pTask->dataRange.window;
pWindow->skey = INT64_MIN;
@ -296,8 +296,8 @@ static void setHTasksId(SArray* pTaskList, const SArray* pHTaskList) {
SStreamTask** pStreamTask = taosArrayGet(pTaskList, i);
SStreamTask** pHTask = taosArrayGet(pHTaskList, i);
(*pStreamTask)->historyTaskId.taskId = (*pHTask)->id.taskId;
(*pStreamTask)->historyTaskId.streamId = (*pHTask)->id.streamId;
(*pStreamTask)->hTaskInfo.id.taskId = (*pHTask)->id.taskId;
(*pStreamTask)->hTaskInfo.id.streamId = (*pHTask)->id.streamId;
(*pHTask)->streamTaskId.taskId = (*pStreamTask)->id.taskId;
(*pHTask)->streamTaskId.streamId = (*pStreamTask)->id.streamId;
@ -345,8 +345,8 @@ static int32_t addSourceTasksForOneLevelStream(SMnode* pMnode, const SQueryPlan*
// new stream task
SArray** pSinkTaskList = taosArrayGet(pStream->tasks, SINK_NODE_LEVEL);
int32_t code = addSourceStreamTask(pMnode, pVgroup, pTaskList, *pSinkTaskList, pStream, plan, pStream->uid, pEpset,
0, hasExtraSink, nextWindowSkey);
int32_t code = addSourceTask(pMnode, pVgroup, pTaskList, *pSinkTaskList, pStream, plan, pStream->uid, pEpset,
false, hasExtraSink, nextWindowSkey, pStream->conf.fillHistory);
if (code != TSDB_CODE_SUCCESS) {
sdbRelease(pSdb, pVgroup);
return -1;
@ -354,8 +354,8 @@ static int32_t addSourceTasksForOneLevelStream(SMnode* pMnode, const SQueryPlan*
if (pStream->conf.fillHistory) {
SArray** pHSinkTaskList = taosArrayGet(pStream->pHTasksList, SINK_NODE_LEVEL);
code = addSourceStreamTask(pMnode, pVgroup, pHTaskList, *pHSinkTaskList, pStream, plan, pStream->hTaskUid,
pEpset, 1, hasExtraSink, nextWindowSkey);
code = addSourceTask(pMnode, pVgroup, pHTaskList, *pHSinkTaskList, pStream, plan, pStream->hTaskUid,
pEpset, true, hasExtraSink, nextWindowSkey, true);
}
sdbRelease(pSdb, pVgroup);
@ -371,10 +371,10 @@ static int32_t addSourceTasksForOneLevelStream(SMnode* pMnode, const SQueryPlan*
return TSDB_CODE_SUCCESS;
}
static int32_t doAddSourceTask(SArray* pTaskList, int8_t fillHistory, int64_t uid, SStreamTask* pDownstreamTask,
static int32_t doAddSourceTask(SArray* pTaskList, bool isFillhistory, int64_t uid, SStreamTask* pDownstreamTask,
SMnode* pMnode, SSubplan* pPlan, SVgObj* pVgroup, SEpSet* pEpset,
int64_t nextWindowSkey) {
SStreamTask* pTask = tNewStreamTask(uid, TASK_LEVEL__SOURCE, fillHistory, 0, pTaskList);
int64_t nextWindowSkey, bool hasFillHistory) {
SStreamTask* pTask = tNewStreamTask(uid, TASK_LEVEL__SOURCE, isFillhistory, 0, pTaskList, hasFillHistory);
if (pTask == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
@ -400,8 +400,8 @@ static int32_t doAddSourceTask(SArray* pTaskList, int8_t fillHistory, int64_t ui
}
static int32_t doAddAggTask(uint64_t uid, SArray* pTaskList, SArray* pSinkNodeList, SMnode* pMnode, SStreamObj* pStream,
SEpSet* pEpset, int32_t fillHistory, SStreamTask** pAggTask) {
*pAggTask = tNewStreamTask(uid, TASK_LEVEL__AGG, fillHistory, pStream->conf.triggerParam, pTaskList);
SEpSet* pEpset, bool fillHistory, SStreamTask** pAggTask, bool hasFillhistory) {
*pAggTask = tNewStreamTask(uid, TASK_LEVEL__AGG, fillHistory, pStream->conf.triggerParam, pTaskList, hasFillhistory);
if (*pAggTask == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
@ -432,7 +432,8 @@ static int32_t addAggTask(SStreamObj* pStream, SMnode* pMnode, SQueryPlan* pPlan
*pAggTask = NULL;
SArray* pSinkNodeList = taosArrayGetP(pStream->tasks, SINK_NODE_LEVEL);
int32_t code = doAddAggTask(pStream->uid, pAggTaskList, pSinkNodeList, pMnode, pStream, pEpset, 0, pAggTask);
int32_t code = doAddAggTask(pStream->uid, pAggTaskList, pSinkNodeList, pMnode, pStream, pEpset, false, pAggTask,
pStream->conf.fillHistory);
if (code != TSDB_CODE_SUCCESS) {
return -1;
}
@ -461,7 +462,7 @@ static int32_t addAggTask(SStreamObj* pStream, SMnode* pMnode, SQueryPlan* pPlan
*pHAggTask = NULL;
code = doAddAggTask(pStream->hTaskUid, pHAggTaskList, pHSinkNodeList, pMnode, pStream, pEpset, pStream->conf.fillHistory,
pHAggTask);
pHAggTask, pStream->conf.fillHistory);
if (code != TSDB_CODE_SUCCESS) {
if (pSnode != NULL) {
sdbRelease(pSdb, pSnode);
@ -520,8 +521,8 @@ static int32_t addSourceTasksForMultiLevelStream(SMnode* pMnode, SQueryPlan* pPl
continue;
}
int32_t code =
doAddSourceTask(pSourceTaskList, 0, pStream->uid, pDownstreamTask, pMnode, plan, pVgroup, pEpset, nextWindowSkey);
int32_t code = doAddSourceTask(pSourceTaskList, false, pStream->uid, pDownstreamTask, pMnode, plan, pVgroup, pEpset,
nextWindowSkey, pStream->conf.fillHistory);
if (code != TSDB_CODE_SUCCESS) {
sdbRelease(pSdb, pVgroup);
terrno = code;
@ -529,8 +530,8 @@ static int32_t addSourceTasksForMultiLevelStream(SMnode* pMnode, SQueryPlan* pPl
}
if (pStream->conf.fillHistory) {
code = doAddSourceTask(pHSourceTaskList, 1, pStream->hTaskUid, pHDownstreamTask, pMnode, plan, pVgroup,
pEpset, nextWindowSkey);
code = doAddSourceTask(pHSourceTaskList, true, pStream->hTaskUid, pHDownstreamTask, pMnode, plan, pVgroup, pEpset,
nextWindowSkey, pStream->conf.fillHistory);
if (code != TSDB_CODE_SUCCESS) {
sdbRelease(pSdb, pVgroup);
return code;
@ -548,16 +549,16 @@ static int32_t addSourceTasksForMultiLevelStream(SMnode* pMnode, SQueryPlan* pPl
}
static int32_t addSinkTasks(SArray* pTasksList, SMnode* pMnode, SStreamObj* pStream, SArray** pCreatedTaskList,
SEpSet* pEpset, int32_t fillHistory) {
SEpSet* pEpset, bool fillHistory) {
SArray* pSinkTaskList = addNewTaskList(pTasksList);
if (pStream->fixedSinkVgId == 0) {
if (mndAddShuffleSinkTasksToStream(pMnode, pSinkTaskList, pStream, pEpset, fillHistory) < 0) {
if (doAddShuffleSinkTask(pMnode, pSinkTaskList, pStream, pEpset, fillHistory) < 0) {
// TODO free
return -1;
}
} else {
if (mndAddSinkTaskToStream(pStream, pSinkTaskList, pMnode, pStream->fixedSinkVgId, &pStream->fixedSinkVg,
pEpset, fillHistory) < 0) {
if (doAddSinkTask(pStream, pSinkTaskList, pMnode, pStream->fixedSinkVgId, &pStream->fixedSinkVg, pEpset,
fillHistory) < 0) {
// TODO free
return -1;
}

View File

@ -316,6 +316,7 @@ _OVER:
mndReleaseSnode(pMnode, pObj);
mndReleaseDnode(pMnode, pDnode);
tFreeSMCreateQnodeReq(&createReq);
return code;
}
@ -425,6 +426,7 @@ _OVER:
}
mndReleaseSnode(pMnode, pObj);
tFreeSMCreateQnodeReq(&dropReq);
return code;
}

View File

@ -859,18 +859,23 @@ int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreat
return 0;
}
static int32_t mndGenIdxNameForFirstTag(char *fullname, char *dbname, char *tagname) {
char randStr[24] = {0};
char randStr[TSDB_COL_NAME_LEN] = {0};
int32_t left = TSDB_COL_NAME_LEN - strlen(tagname) - 1;
if (left <= 1) {
sprintf(fullname, "%s.%s", dbname, tagname);
} else {
int8_t start = left < 8 ? 0 : 8;
int8_t end = left >= 24 ? 24 : left - 1;
// gen rand str len [base:end]
// note: ignore rand performance issues
int64_t len = taosRand() % (end - start + 1) + start;
taosRandStr2(randStr, len);
sprintf(fullname, "%s.%s_%s", dbname, tagname, randStr);
}
int8_t start = 8;
int8_t end = sizeof(randStr) - 1;
// gen rand str len [base:end]
// note: ignore rand performance issues
int64_t len = taosRand() % (end - start + 1) + start;
taosRandStr2(randStr, len);
sprintf(fullname, "%s.%s_%s", dbname, tagname, randStr);
return 0;
}
static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCreate, SDbObj *pDb) {
SStbObj stbObj = {0};
int32_t code = -1;
@ -1075,80 +1080,6 @@ static int32_t mndBuildStbFromAlter(SStbObj *pStb, SStbObj *pDst, SMCreateStbReq
return TSDB_CODE_SUCCESS;
}
static char *mndAuditFieldTypeStr(int32_t type) {
switch (type) {
case TSDB_DATA_TYPE_NULL:
return "null";
case TSDB_DATA_TYPE_BOOL:
return "bool";
case TSDB_DATA_TYPE_TINYINT:
return "tinyint";
case TSDB_DATA_TYPE_SMALLINT:
return "smallint";
case TSDB_DATA_TYPE_INT:
return "int";
case TSDB_DATA_TYPE_BIGINT:
return "bigint";
case TSDB_DATA_TYPE_FLOAT:
return "float";
case TSDB_DATA_TYPE_DOUBLE:
return "double";
case TSDB_DATA_TYPE_VARCHAR:
return "varchar";
case TSDB_DATA_TYPE_TIMESTAMP:
return "timestamp";
case TSDB_DATA_TYPE_NCHAR:
return "nchar";
case TSDB_DATA_TYPE_UTINYINT:
return "utinyint";
case TSDB_DATA_TYPE_USMALLINT:
return "usmallint";
case TSDB_DATA_TYPE_UINT:
return "uint";
case TSDB_DATA_TYPE_UBIGINT:
return "ubigint";
case TSDB_DATA_TYPE_JSON:
return "json";
case TSDB_DATA_TYPE_VARBINARY:
return "varbinary";
case TSDB_DATA_TYPE_DECIMAL:
return "decimal";
case TSDB_DATA_TYPE_BLOB:
return "blob";
case TSDB_DATA_TYPE_MEDIUMBLOB:
return "mediumblob";
case TSDB_DATA_TYPE_GEOMETRY:
return "geometry";
default:
return "error";
}
}
static void mndAuditFieldStr(char *detail, SArray *arr, int32_t len, int32_t max) {
int32_t detialLen = strlen(detail);
int32_t fieldLen = 0;
for (int32_t i = 0; i < len; ++i) {
SField *pField = taosArrayGet(arr, i);
char field[TSDB_COL_NAME_LEN + 20] = {0};
fieldLen = strlen(", ");
if (detialLen > 0 && detialLen < max - fieldLen - 1) {
strcat(detail, ", ");
detialLen += fieldLen;
} else {
break;
}
sprintf(field, "%s:%s", pField->name, mndAuditFieldTypeStr(pField->type));
fieldLen = strlen(field);
if (detialLen < max - fieldLen - 1) {
strcat(detail, field);
detialLen += fieldLen;
} else {
break;
}
}
}
static int32_t mndProcessCreateStbReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
int32_t code = -1;
@ -1257,26 +1188,10 @@ static int32_t mndProcessCreateStbReq(SRpcMsg *pReq) {
}
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
char detail[AUDIT_DETAIL_MAX] = {0};
sprintf(detail,
"colVer:%d, delay1:%" PRId64 ", delay2:%" PRId64 ", deleteMark1:%" PRId64
", "
"deleteMark2:%" PRId64
", igExists:%d, numOfColumns:%d, numOfFuncs:%d, numOfTags:%d, "
"source:%d, suid:%" PRId64
", tagVer:%d, ttl:%d, "
"watermark1:%" PRId64 ", watermark2:%" PRId64,
createReq.colVer, createReq.delay1, createReq.delay2, createReq.deleteMark1, createReq.deleteMark2,
createReq.igExists, createReq.numOfColumns, createReq.numOfFuncs, createReq.numOfTags, createReq.source,
createReq.suid, createReq.tagVer, createReq.ttl, createReq.watermark1, createReq.watermark2);
mndAuditFieldStr(detail, createReq.pColumns, createReq.numOfColumns, AUDIT_DETAIL_MAX);
mndAuditFieldStr(detail, createReq.pTags, createReq.numOfTags, AUDIT_DETAIL_MAX);
SName name = {0};
tNameFromString(&name, createReq.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
auditRecord(pReq, pMnode->clusterId, "createStb", name.dbname, name.tname, detail);
auditRecord(pReq, pMnode->clusterId, "createStb", name.dbname, name.tname, createReq.sql, createReq.sqlLen);
_OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
@ -2333,7 +2248,7 @@ static int32_t mndProcessAlterStbReq(SRpcMsg *pReq) {
pDb = mndAcquireDbByStb(pMnode, alterReq.name);
if (pDb == NULL) {
terrno = TSDB_CODE_MND_INVALID_DB;
terrno = TSDB_CODE_MND_DB_NOT_EXIST;
goto _OVER;
}
@ -2350,13 +2265,10 @@ static int32_t mndProcessAlterStbReq(SRpcMsg *pReq) {
code = mndAlterStb(pMnode, pReq, &alterReq, pDb, pStb);
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
char detail[2000] = {0};
sprintf(detail, "alterType:%d, numOfFields:%d, ttl:%d", alterReq.alterType, alterReq.numOfFields, alterReq.ttl);
SName name = {0};
tNameFromString(&name, alterReq.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
auditRecord(pReq, pMnode->clusterId, "alterStb", name.dbname, name.tname, detail);
auditRecord(pReq, pMnode->clusterId, "alterStb", name.dbname, name.tname, alterReq.sql, alterReq.sqlLen);
_OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
@ -2448,6 +2360,7 @@ static int32_t mndDropStb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *p
if (mndSetDropStbRedoActions(pMnode, pTrans, pDb, pStb) != 0) goto _OVER;
if (mndDropIdxsByStb(pMnode, pTrans, pDb, pStb) != 0) goto _OVER;
if (mndDropSmasByStb(pMnode, pTrans, pDb, pStb) != 0) goto _OVER;
if (mndUserRemoveStb(pMnode, pTrans, pStb->name) != 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
code = 0;
@ -2619,13 +2532,10 @@ static int32_t mndProcessDropStbReq(SRpcMsg *pReq) {
code = mndDropStb(pMnode, pReq, pDb, pStb);
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
char detail[2000] = {0};
sprintf(detail, "igNotExists:%d, source:%d", dropReq.igNotExists, dropReq.source);
SName name = {0};
tNameFromString(&name, dropReq.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
auditRecord(pReq, pMnode->clusterId, "dropStb", name.dbname, name.tname, detail);
auditRecord(pReq, pMnode->clusterId, "dropStb", name.dbname, name.tname, dropReq.sql, dropReq.sqlLen);
_OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
@ -2634,6 +2544,7 @@ _OVER:
mndReleaseDb(pMnode, pDb);
mndReleaseStb(pMnode, pStb);
tFreeSMDropStbReq(&dropReq);
return code;
}
@ -3627,7 +3538,7 @@ static int32_t mndProcessCreateIndexReq(SRpcMsg *pReq) {
pDb = mndAcquireDbByStb(pMnode, tagIdxReq.dbFName);
if (pDb == NULL) {
terrno = TSDB_CODE_MND_INVALID_DB;
terrno = TSDB_CODE_MND_DB_NOT_EXIST;
goto _OVER;
}

View File

@ -37,17 +37,19 @@
typedef struct SNodeEntry {
int32_t nodeId;
bool stageUpdated; // the stage has been updated due to the leader/follower change or node reboot.
SEpSet epset; // compare the epset to identify the vgroup tranferring between different dnodes.
int64_t hbTimestamp; // second
} SNodeEntry;
typedef struct SStreamVnodeRevertIndex {
typedef struct SStreamExecNodeInfo {
SArray *pNodeEntryList;
int64_t ts; // snapshot ts
int64_t ts; // snapshot ts
int64_t activeCheckpoint; // active check point id
SHashObj *pTaskMap;
SArray *pTaskList;
TdThreadMutex lock;
} SStreamVnodeRevertIndex;
} SStreamExecNodeInfo;
typedef struct SVgroupChangeInfo {
SHashObj *pDBMap;
@ -55,7 +57,7 @@ typedef struct SVgroupChangeInfo {
} SVgroupChangeInfo;
static int32_t mndNodeCheckSentinel = 0;
static SStreamVnodeRevertIndex execNodeList;
static SStreamExecNodeInfo execNodeList;
static int32_t mndStreamActionInsert(SSdb *pSdb, SStreamObj *pStream);
static int32_t mndStreamActionDelete(SSdb *pSdb, SStreamObj *pStream);
@ -65,9 +67,6 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq);
static int32_t mndProcessStreamCheckpointTmr(SRpcMsg *pReq);
static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq);
static int32_t mndProcessStreamHb(SRpcMsg *pReq);
static int32_t mndProcessRecoverStreamReq(SRpcMsg *pReq);
static int32_t mndProcessStreamMetaReq(SRpcMsg *pReq);
static int32_t mndGetStreamMeta(SRpcMsg *pReq, SShowObj *pShow, STableMetaRsp *pMeta);
static int32_t mndRetrieveStream(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows);
static void mndCancelGetNextStream(SMnode *pMnode, void *pIter);
static int32_t mndRetrieveStreamTask(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows);
@ -78,13 +77,19 @@ static int32_t mndBuildStreamCheckpointSourceReq2(void **pBuf, int32_t *pLen, in
int64_t streamId, int32_t taskId);
static int32_t mndProcessNodeCheck(SRpcMsg *pReq);
static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg);
static void keepStreamTasksInBuf(SStreamObj *pStream, SStreamVnodeRevertIndex *pExecNode);
static SArray *doExtractNodeListFromStream(SMnode *pMnode);
static SArray *extractNodeListFromStream(SMnode *pMnode);
static SArray *mndTakeVgroupSnapshot(SMnode *pMnode);
static SVgroupChangeInfo mndFindChangedNodeInfo(SMnode *pMnode, const SArray *pPrevNodeList, const SArray *pNodeList);
static int32_t mndPersistTransLog(SStreamObj *pStream, STrans *pTrans);
static STrans *doCreateTrans(SMnode *pMnode, SStreamObj *pStream, const char *name);
static int32_t mndPersistTransLog(SStreamObj *pStream, STrans *pTrans);
static void initTransAction(STransAction *pAction, void *pCont, int32_t contLen, int32_t msgType, const SEpSet *pEpset);
static int32_t createStreamUpdateTrans(SMnode *pMnode, SStreamObj *pStream, SVgroupChangeInfo *pInfo);
static void removeStreamTasksInBuf(SStreamObj* pStream, SStreamExecNodeInfo * pExecNode);
static void keepStreamTasksInBuf(SStreamObj *pStream, SStreamExecNodeInfo *pExecNode);
static int32_t removeExpirednodeEntryAndTask(SArray *pNodeSnapshot);
int32_t mndInitStream(SMnode *pMnode) {
SSdbTable table = {
@ -107,6 +112,7 @@ int32_t mndInitStream(SMnode *pMnode) {
mndSetMsgHandle(pMnode, TDMT_STREAM_TASK_RESUME_RSP, mndTransProcessRsp);
mndSetMsgHandle(pMnode, TDMT_STREAM_TASK_STOP_RSP, mndTransProcessRsp);
mndSetMsgHandle(pMnode, TDMT_VND_STREAM_TASK_UPDATE_RSP, mndTransProcessRsp);
mndSetMsgHandle(pMnode, TDMT_VND_STREAM_TASK_RESET_RSP, mndTransProcessRsp);
mndSetMsgHandle(pMnode, TDMT_VND_STREAM_CHECK_POINT_SOURCE_RSP, mndTransProcessRsp);
mndSetMsgHandle(pMnode, TDMT_MND_STREAM_CHECKPOINT_TIMER, mndProcessStreamCheckpointTmr);
@ -125,7 +131,7 @@ int32_t mndInitStream(SMnode *pMnode) {
taosThreadMutexInit(&execNodeList.lock, NULL);
execNodeList.pTaskMap = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), true, HASH_NO_LOCK);
execNodeList.pTaskList = taosArrayInit(4, sizeof(STaskStatusEntry));
execNodeList.pTaskList = taosArrayInit(4, sizeof(STaskId));
return sdbSetTable(pMnode->pSdb, table);
}
@ -579,21 +585,6 @@ int32_t mndPersistDropStreamLog(SMnode *pMnode, STrans *pTrans, SStreamObj *pStr
return 0;
}
static int32_t mndSetStreamRecover(SMnode *pMnode, STrans *pTrans, const SStreamObj *pStream) {
SStreamObj streamObj = {0};
memcpy(streamObj.name, pStream->name, TSDB_STREAM_FNAME_LEN);
streamObj.status = STREAM_STATUS__RECOVER;
SSdbRaw *pCommitRaw = mndStreamActionEncode(&streamObj);
if (pCommitRaw == NULL) return -1;
if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
mError("stream trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
return -1;
}
(void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY);
return 0;
}
static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStreamObj *pStream, const char *user) {
SStbObj *pStb = NULL;
SDbObj *pDb = NULL;
@ -802,17 +793,6 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
}
}
// pDb = mndAcquireDb(pMnode, streamObj.sourceDb);
// if (pDb->cfg.replications != 1) {
// mError("stream source db must have only 1 replica, but %s has %d", pDb->name, pDb->cfg.replications);
// terrno = TSDB_CODE_MND_MULTI_REPLICA_SOURCE_DB;
// mndReleaseDb(pMnode, pDb);
// pDb = NULL;
// goto _OVER;
// }
// mndReleaseDb(pMnode, pDb);
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq, "create-stream");
if (pTrans == NULL) {
mError("stream:%s, failed to create since %s", createStreamReq.name, terrstr());
@ -874,22 +854,15 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
code = TSDB_CODE_ACTION_IN_PROGRESS;
char detail[2000] = {0};
sprintf(detail,
"checkpointFreq:%" PRId64 ", createStb:%d, deleteMark:%" PRId64
", fillHistory:%d, igExists:%d, igExpired:%d, igUpdate:%d, lastTs:%" PRId64 ", maxDelay:%" PRId64
", numOfTags:%d, sourceDB:%s, targetStbFullName:%s, triggerType:%d, watermark:%" PRId64,
createStreamReq.checkpointFreq, createStreamReq.createStb, createStreamReq.deleteMark,
createStreamReq.fillHistory, createStreamReq.igExists, createStreamReq.igExpired, createStreamReq.igUpdate,
createStreamReq.lastTs, createStreamReq.maxDelay, createStreamReq.numOfTags, createStreamReq.sourceDB,
createStreamReq.targetStbFullName, createStreamReq.triggerType, createStreamReq.watermark);
SName name = {0};
tNameFromString(&name, createStreamReq.name, T_NAME_ACCT | T_NAME_DB);
//reuse this function for stream
auditRecord(pReq, pMnode->clusterId, "createStream", name.dbname, "", detail);
//TODO
if (createStreamReq.sql != NULL) {
auditRecord(pReq, pMnode->clusterId, "createStream", name.dbname, "",
createStreamReq.sql, strlen(createStreamReq.sql));
}
_OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
mError("stream:%s, failed to create since %s", createStreamReq.name, terrstr());
@ -1063,8 +1036,7 @@ static int32_t mndBuildStreamCheckpointSourceReq2(void **pBuf, int32_t *pLen, in
// return -1;
// }
static int32_t mndAddStreamCheckpointToTrans(STrans *pTrans, SStreamObj *pStream, SMnode *pMnode,
int64_t checkpointId) {
static int32_t mndAddStreamCheckpointToTrans(STrans *pTrans, SStreamObj *pStream, SMnode *pMnode, int64_t chkptId) {
taosWLockLatch(&pStream->lock);
int32_t totLevel = taosArrayGetSize(pStream->tasks);
@ -1088,7 +1060,7 @@ static int32_t mndAddStreamCheckpointToTrans(STrans *pTrans, SStreamObj *pStream
void *buf;
int32_t tlen;
if (mndBuildStreamCheckpointSourceReq2(&buf, &tlen, pTask->info.nodeId, checkpointId, pTask->id.streamId,
if (mndBuildStreamCheckpointSourceReq2(&buf, &tlen, pTask->info.nodeId, chkptId, pTask->id.streamId,
pTask->id.taskId) < 0) {
mndReleaseVgroup(pMnode, pVgObj);
taosWUnLockLatch(&pStream->lock);
@ -1109,9 +1081,9 @@ static int32_t mndAddStreamCheckpointToTrans(STrans *pTrans, SStreamObj *pStream
}
}
pStream->checkpointId = checkpointId;
pStream->checkpointId = chkptId;
pStream->checkpointFreq = taosGetTimestampMs();
atomic_store_64(&pStream->currentTick, 0);
pStream->currentTick = 0;
// 3. commit log: stream checkpoint info
pStream->version = pStream->version + 1;
@ -1166,16 +1138,23 @@ static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) {
execNodeList.pNodeEntryList = taosArrayDestroy(execNodeList.pNodeEntryList);
}
execNodeList.pNodeEntryList = doExtractNodeListFromStream(pMnode);
execNodeList.pNodeEntryList = extractNodeListFromStream(pMnode);
}
if (taosArrayGetSize(execNodeList.pNodeEntryList) == 0) {
mDebug("end to do stream task node change checking, no vgroup exists, do nothing");
mDebug("stream task node change checking done, no vgroups exist, do nothing");
execNodeList.ts = ts;
atomic_store_32(&mndNodeCheckSentinel, 0);
return 0;
}
for(int32_t i = 0; i < taosArrayGetSize(execNodeList.pNodeEntryList); ++i) {
SNodeEntry* pNodeEntry = taosArrayGet(execNodeList.pNodeEntryList, i);
if (pNodeEntry->stageUpdated) {
mDebug("stream task not ready due to node update detected, checkpoint not issued");
return 0;
}
}
SArray *pNodeSnapshot = mndTakeVgroupSnapshot(pMnode);
SVgroupChangeInfo changeInfo = mndFindChangedNodeInfo(pMnode, execNodeList.pNodeEntryList, pNodeSnapshot);
@ -1185,7 +1164,7 @@ static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) {
taosArrayDestroy(pNodeSnapshot);
if (nodeUpdated) {
mDebug("stream task not ready due to node update, not generate checkpoint");
mDebug("stream task not ready due to node update, checkpoint not issued");
return 0;
}
}
@ -1195,10 +1174,15 @@ static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) {
taosThreadMutexLock(&execNodeList.lock);
for (int32_t i = 0; i < taosArrayGetSize(execNodeList.pTaskList); ++i) {
STaskStatusEntry *p = taosArrayGet(execNodeList.pTaskList, i);
if (p->status != TASK_STATUS__NORMAL) {
mDebug("s-task:0x%" PRIx64 "-0x%x (nodeId:%d) status:%s not ready, create checkpoint msg not issued",
p->streamId, p->taskId, 0, streamGetTaskStatusStr(p->status));
STaskId *p = taosArrayGet(execNodeList.pTaskList, i);
STaskStatusEntry* pEntry = taosHashGet(execNodeList.pTaskMap, p, sizeof(*p));
if (pEntry == NULL) {
continue;
}
if (pEntry->status != TASK_STATUS__NORMAL) {
mDebug("s-task:0x%" PRIx64 "-0x%x (nodeId:%d) status:%s not ready, checkpoint msg not issued",
pEntry->id.streamId, (int32_t)pEntry->id.taskId, 0, streamGetTaskStatusStr(pEntry->status));
ready = false;
break;
}
@ -1268,15 +1252,18 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
if (dropReq.igNotExists) {
mInfo("stream:%s, not exist, ignore not exist is set", dropReq.name);
sdbRelease(pMnode->pSdb, pStream);
tFreeSMDropStreamReq(&dropReq);
return 0;
} else {
terrno = TSDB_CODE_MND_STREAM_NOT_EXIST;
tFreeSMDropStreamReq(&dropReq);
return -1;
}
}
if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pStream->targetDb) != 0) {
sdbRelease(pMnode->pSdb, pStream);
tFreeSMDropStreamReq(&dropReq);
return -1;
}
@ -1284,6 +1271,7 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
if (pTrans == NULL) {
mError("stream:%s, failed to drop since %s", dropReq.name, terrstr());
sdbRelease(pMnode->pSdb, pStream);
tFreeSMDropStreamReq(&dropReq);
return -1;
}
@ -1293,15 +1281,16 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
if (mndTransCheckConflict(pMnode, pTrans) != 0) {
sdbRelease(pMnode->pSdb, pStream);
mndTransDrop(pTrans);
tFreeSMDropStreamReq(&dropReq);
return -1;
}
// mndTransSetSerial(pTrans);
// drop all tasks
if (mndDropStreamTasks(pMnode, pTrans, pStream) < 0) {
mError("stream:%s, failed to drop task since %s", dropReq.name, terrstr());
sdbRelease(pMnode->pSdb, pStream);
mndTransDrop(pTrans);
tFreeSMDropStreamReq(&dropReq);
return -1;
}
@ -1309,6 +1298,7 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
if (mndPersistDropStreamLog(pMnode, pTrans, pStream) < 0) {
sdbRelease(pMnode->pSdb, pStream);
mndTransDrop(pTrans);
tFreeSMDropStreamReq(&dropReq);
return -1;
}
@ -1316,20 +1306,21 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
mError("trans:%d, failed to prepare drop stream trans since %s", pTrans->id, terrstr());
sdbRelease(pMnode->pSdb, pStream);
mndTransDrop(pTrans);
tFreeSMDropStreamReq(&dropReq);
return -1;
}
char detail[100] = {0};
sprintf(detail, "igNotExists:%d", dropReq.igNotExists);
removeStreamTasksInBuf(pStream, &execNodeList);
SName name = {0};
tNameFromString(&name, dropReq.name, T_NAME_ACCT | T_NAME_DB);
//reuse this function for stream
auditRecord(pReq, pMnode->clusterId, "dropStream", name.dbname, "", detail);
auditRecord(pReq, pMnode->clusterId, "dropStream", name.dbname, "", dropReq.sql, dropReq.sqlLen);
sdbRelease(pMnode->pSdb, pStream);
mndTransDrop(pTrans);
tFreeSMDropStreamReq(&dropReq);
return TSDB_CODE_ACTION_IN_PROGRESS;
}
@ -1564,35 +1555,58 @@ static int32_t mndRetrieveStreamTask(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
}
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&level, false);
colDataSetVal(pColInfo, numOfRows, (const char *)level, false);
// status
char status[20 + VARSTR_HEADER_SIZE] = {0};
int8_t taskStatus = atomic_load_8(&pTask->status.taskStatus);
if (taskStatus == TASK_STATUS__NORMAL) {
memcpy(varDataVal(status), "normal", 6);
varDataSetLen(status, 6);
} else if (taskStatus == TASK_STATUS__DROPPING) {
memcpy(varDataVal(status), "dropping", 8);
varDataSetLen(status, 8);
} else if (taskStatus == TASK_STATUS__UNINIT) {
memcpy(varDataVal(status), "uninit", 6);
varDataSetLen(status, 4);
} else if (taskStatus == TASK_STATUS__STOP) {
memcpy(varDataVal(status), "stop", 4);
varDataSetLen(status, 4);
} else if (taskStatus == TASK_STATUS__SCAN_HISTORY) {
memcpy(varDataVal(status), "history", 7);
varDataSetLen(status, 7);
} else if (taskStatus == TASK_STATUS__HALT) {
memcpy(varDataVal(status), "halt", 4);
varDataSetLen(status, 4);
} else if (taskStatus == TASK_STATUS__PAUSE) {
memcpy(varDataVal(status), "pause", 5);
varDataSetLen(status, 5);
char status[20 + VARSTR_HEADER_SIZE] = {0};
STaskId id = {.streamId = pTask->id.streamId, .taskId = pTask->id.taskId};
STaskStatusEntry* pe = taosHashGet(execNodeList.pTaskMap, &id, sizeof(id));
if (pe == NULL) {
continue;
}
const char* pStatus = streamGetTaskStatusStr(pe->status);
STR_TO_VARSTR(status, pStatus);
// status
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&status, false);
colDataSetVal(pColInfo, numOfRows, (const char *)status, false);
// stage
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pe->stage, false);
// input queue
char vbuf[30] = {0};
char buf[25] = {0};
const char* queueInfoStr = "%4.2fMiB (%5.2f%)";
sprintf(buf, queueInfoStr, pe->inputQUsed, pe->inputRate);
STR_TO_VARSTR(vbuf, buf);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char*)vbuf, false);
// output queue
// sprintf(buf, queueInfoStr, pe->outputQUsed, pe->outputRate);
// STR_TO_VARSTR(vbuf, buf);
// pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
// colDataSetVal(pColInfo, numOfRows, (const char*)vbuf, false);
if (pTask->info.taskLevel == TASK_LEVEL__SINK) {
const char* sinkStr = "%.2fMiB";
sprintf(buf, sinkStr, pe->sinkDataSize);
} else if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
// offset info
const char *offsetStr = "%" PRId64 " [%" PRId64 ", %" PRId64 "]";
sprintf(buf, offsetStr, pe->processedVer, pe->verStart, pe->verEnd);
}
STR_TO_VARSTR(vbuf, buf);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char*)vbuf, false);
numOfRows++;
}
@ -1635,7 +1649,9 @@ static int32_t mndPauseStreamTask(STrans *pTrans, SStreamTask *pTask) {
return 0;
}
int32_t mndPauseAllStreamTaskImpl(STrans *pTrans, SArray *tasks) {
int32_t mndPauseAllStreamTasks(STrans *pTrans, SStreamObj *pStream) {
SArray* tasks = pStream->tasks;
int32_t size = taosArrayGetSize(tasks);
for (int32_t i = 0; i < size; i++) {
SArray *pTasks = taosArrayGetP(tasks, i);
@ -1655,16 +1671,6 @@ int32_t mndPauseAllStreamTaskImpl(STrans *pTrans, SArray *tasks) {
return 0;
}
int32_t mndPauseAllStreamTasks(STrans *pTrans, SStreamObj *pStream) {
int32_t code = mndPauseAllStreamTaskImpl(pTrans, pStream->tasks);
if (code != 0) {
return code;
}
// pStream->pHTasksList is null
// code = mndPauseAllStreamTaskImpl(pTrans, pStream->pHTasksList);
return code;
}
static int32_t mndPersistStreamLog(STrans *pTrans, const SStreamObj *pStream, int8_t status) {
SStreamObj streamObj = {0};
memcpy(streamObj.name, pStream->name, TSDB_STREAM_FNAME_LEN);
@ -1718,6 +1724,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) {
sdbRelease(pMnode->pSdb, pStream);
return -1;
}
mInfo("trans:%d, used to pause stream:%s", pTrans->id, pauseReq.name);
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
@ -1729,7 +1736,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) {
// pause all tasks
if (mndPauseAllStreamTasks(pTrans, pStream) < 0) {
mError("stream:%s, failed to drop task since %s", pauseReq.name, terrstr());
mError("stream:%s, failed to pause task since %s", pauseReq.name, terrstr());
sdbRelease(pMnode->pSdb, pStream);
mndTransDrop(pTrans);
return -1;
@ -1890,6 +1897,7 @@ static int32_t doBuildStreamTaskUpdateMsg(void **pBuf, int32_t *pLen, SVgroupCha
tEncodeSize(tEncodeStreamTaskUpdateMsg, &req, blen, code);
if (code < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
taosArrayDestroy(req.pNodeList);
return -1;
}
@ -1898,6 +1906,7 @@ static int32_t doBuildStreamTaskUpdateMsg(void **pBuf, int32_t *pLen, SVgroupCha
void *buf = taosMemoryMalloc(tlen);
if (buf == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
taosArrayDestroy(req.pNodeList);
return -1;
}
@ -1915,6 +1924,7 @@ static int32_t doBuildStreamTaskUpdateMsg(void **pBuf, int32_t *pLen, SVgroupCha
*pBuf = buf;
*pLen = tlen;
taosArrayDestroy(req.pNodeList);
return TSDB_CODE_SUCCESS;
}
@ -1953,20 +1963,9 @@ void initTransAction(STransAction *pAction, void *pCont, int32_t contLen, int32_
// todo extract method: traverse stream tasks
// build trans to update the epset
static int32_t createStreamUpdateTrans(SMnode *pMnode, SStreamObj *pStream, SVgroupChangeInfo *pInfo) {
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, NULL, "stream-task-update");
STrans* pTrans = doCreateTrans(pMnode, pStream, "stream-task-update");
if (pTrans == NULL) {
mError("failed to build stream task DAG update, reason: %s", tstrerror(TSDB_CODE_OUT_OF_MEMORY));
return -1;
}
mDebug("start to build stream:0x%" PRIx64 " task DAG update", pStream->uid);
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
if (mndTransCheckConflict(pMnode, pTrans) != 0) {
mError("failed to build stream:0x%" PRIx64 " task DAG update, code:%s", pStream->uid,
tstrerror(TSDB_CODE_MND_TRANS_CONFLICT));
mndTransDrop(pTrans);
return -1;
return terrno;
}
taosWLockLatch(&pStream->lock);
@ -2046,7 +2045,7 @@ static SVgroupChangeInfo mndFindChangedNodeInfo(SMnode *pMnode, const SArray *pP
SNodeEntry *pCurrent = taosArrayGet(pNodeList, j);
if (pCurrent->nodeId == pPrevEntry->nodeId) {
if (isNodeEpsetChanged(&pPrevEntry->epset, &pCurrent->epset)) {
if (pPrevEntry->stageUpdated || isNodeEpsetChanged(&pPrevEntry->epset, &pCurrent->epset)) {
const SEp *pPrevEp = GET_ACTIVE_EP(&pPrevEntry->epset);
char buf[256] = {0};
@ -2127,7 +2126,7 @@ static int32_t mndProcessVgroupChange(SMnode *pMnode, SVgroupChangeInfo *pChange
return 0;
}
static SArray *doExtractNodeListFromStream(SMnode *pMnode) {
static SArray *extractNodeListFromStream(SMnode *pMnode) {
SSdb *pSdb = pMnode->pSdb;
SStreamObj *pStream = NULL;
void *pIter = NULL;
@ -2148,11 +2147,9 @@ static SArray *doExtractNodeListFromStream(SMnode *pMnode) {
int32_t numOfTasks = taosArrayGetSize(pLevel);
for (int32_t k = 0; k < numOfTasks; ++k) {
SStreamTask *pTask = taosArrayGetP(pLevel, k);
SNodeEntry entry = {0};
epsetAssign(&entry.epset, &pTask->info.epSet);
entry.nodeId = pTask->info.nodeId;
entry.hbTimestamp = -1;
SNodeEntry entry = {.hbTimestamp = -1, .nodeId = pTask->info.nodeId};
epsetAssign(&entry.epset, &pTask->info.epSet);
taosHashPut(pHash, &entry.nodeId, sizeof(entry.nodeId), &entry, sizeof(entry));
}
}
@ -2190,6 +2187,81 @@ static void doExtractTasksFromStream(SMnode *pMnode) {
}
}
static int32_t doRemoveFromTask(SStreamExecNodeInfo* pExecNode, STaskId* pRemovedId) {
void *p = taosHashGet(pExecNode->pTaskMap, pRemovedId, sizeof(*pRemovedId));
if (p != NULL) {
taosHashRemove(pExecNode->pTaskMap, pRemovedId, sizeof(*pRemovedId));
for(int32_t k = 0; k < taosArrayGetSize(pExecNode->pTaskList); ++k) {
STaskId* pId = taosArrayGet(pExecNode->pTaskList, k);
if (pId->taskId == pRemovedId->taskId && pId->streamId == pRemovedId->streamId) {
taosArrayRemove(pExecNode->pTaskList, k);
mInfo("s-task:0x%x removed from buffer, remain:%d", (int32_t) pRemovedId->taskId,
(int32_t)taosArrayGetSize(pExecNode->pTaskList));
break;
}
}
}
return 0;
}
static bool taskNodeExists(SArray* pList, int32_t nodeId) {
size_t num = taosArrayGetSize(pList);
for(int32_t i = 0; i < num; ++i) {
SNodeEntry* pEntry = taosArrayGet(pList, i);
if (pEntry->nodeId == nodeId) {
return true;
}
}
return false;
}
int32_t removeExpirednodeEntryAndTask(SArray *pNodeSnapshot) {
SArray* pRemoveTaskList = taosArrayInit(4, sizeof(STaskId));
int32_t numOfTask = taosArrayGetSize(execNodeList.pTaskList);
for(int32_t i = 0; i < numOfTask; ++i) {
STaskId* pId = taosArrayGet(execNodeList.pTaskList, i);
STaskStatusEntry* pEntry = taosHashGet(execNodeList.pTaskMap, pId, sizeof(*pId));
bool existed = taskNodeExists(pNodeSnapshot, pEntry->nodeId);
if (!existed) {
taosArrayPush(pRemoveTaskList, pId);
}
}
for(int32_t i = 0; i < taosArrayGetSize(pRemoveTaskList); ++i) {
STaskId* pId = taosArrayGet(pRemoveTaskList, i);
doRemoveFromTask(&execNodeList, pId);
}
mDebug("remove invalid stream tasks:%d, remain:%d", (int32_t)taosArrayGetSize(pRemoveTaskList),
(int32_t) taosArrayGetSize(execNodeList.pTaskList));
int32_t size = taosArrayGetSize(pNodeSnapshot);
SArray* pValidNodeEntryList = taosArrayInit(4, sizeof(SNodeEntry));
for(int32_t i = 0; i < taosArrayGetSize(execNodeList.pNodeEntryList); ++i) {
SNodeEntry* p = taosArrayGet(execNodeList.pNodeEntryList, i);
for(int32_t j = 0; j < size; ++j) {
SNodeEntry* pEntry = taosArrayGet(pNodeSnapshot, j);
if (pEntry->nodeId == p->nodeId) {
taosArrayPush(pValidNodeEntryList, p);
break;
}
}
}
execNodeList.pNodeEntryList = taosArrayDestroy(execNodeList.pNodeEntryList);
execNodeList.pNodeEntryList = pValidNodeEntryList;
taosArrayDestroy(pRemoveTaskList);
return 0;
}
// this function runs by only one thread, so it is not multi-thread safe
static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg) {
int32_t code = 0;
@ -2208,7 +2280,7 @@ static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg) {
execNodeList.pNodeEntryList = taosArrayDestroy(execNodeList.pNodeEntryList);
}
execNodeList.pNodeEntryList = doExtractNodeListFromStream(pMnode);
execNodeList.pNodeEntryList = extractNodeListFromStream(pMnode);
}
if (taosArrayGetSize(execNodeList.pNodeEntryList) == 0) {
@ -2220,6 +2292,9 @@ static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg) {
SArray *pNodeSnapshot = mndTakeVgroupSnapshot(pMnode);
taosThreadMutexLock(&execNodeList.lock);
removeExpirednodeEntryAndTask(pNodeSnapshot);
SVgroupChangeInfo changeInfo = mndFindChangedNodeInfo(pMnode, execNodeList.pNodeEntryList, pNodeSnapshot);
if (taosArrayGetSize(changeInfo.pUpdateNodeList) > 0) {
code = mndProcessVgroupChange(pMnode, &changeInfo);
@ -2236,6 +2311,7 @@ static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg) {
taosArrayDestroy(pNodeSnapshot);
}
taosThreadMutexUnlock(&execNodeList.lock);
taosArrayDestroy(changeInfo.pUpdateNodeList);
taosHashCleanup(changeInfo.pDBMap);
@ -2245,9 +2321,13 @@ static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg) {
}
typedef struct SMStreamNodeCheckMsg {
int8_t holder; // // to fix windows compile error, define place holder
int8_t placeHolder; // // to fix windows compile error, define place holder
} SMStreamNodeCheckMsg;
typedef struct SMStreamTaskResetMsg {
int8_t placeHolder;
} SMStreamTaskResetMsg;
static int32_t mndProcessNodeCheck(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
SSdb *pSdb = pMnode->pSdb;
@ -2256,13 +2336,39 @@ static int32_t mndProcessNodeCheck(SRpcMsg *pReq) {
}
SMStreamNodeCheckMsg *pMsg = rpcMallocCont(sizeof(SMStreamNodeCheckMsg));
SRpcMsg rpcMsg = {
.msgType = TDMT_MND_STREAM_NODECHANGE_CHECK, .pCont = pMsg, .contLen = sizeof(SMStreamNodeCheckMsg)};
SRpcMsg rpcMsg = {
.msgType = TDMT_MND_STREAM_NODECHANGE_CHECK, .pCont = pMsg, .contLen = sizeof(SMStreamNodeCheckMsg)};
tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg);
return 0;
}
static void keepStreamTasksInBuf(SStreamObj *pStream, SStreamVnodeRevertIndex *pExecNode) {
void keepStreamTasksInBuf(SStreamObj *pStream, SStreamExecNodeInfo *pExecNode) {
int32_t level = taosArrayGetSize(pStream->tasks);
for (int32_t i = 0; i < level; i++) {
SArray *pLevel = taosArrayGetP(pStream->tasks, i);
int32_t numOfTasks = taosArrayGetSize(pLevel);
for (int32_t j = 0; j < numOfTasks; j++) {
SStreamTask *pTask = taosArrayGetP(pLevel, j);
STaskId id = {.streamId = pTask->id.streamId, .taskId = pTask->id.taskId};
void *p = taosHashGet(pExecNode->pTaskMap, &id, sizeof(id));
if (p == NULL) {
STaskStatusEntry entry = {0};
streamTaskStatusInit(&entry, pTask);
taosHashPut(pExecNode->pTaskMap, &id, sizeof(id), &entry, sizeof(entry));
taosArrayPush(pExecNode->pTaskList, &id);
mInfo("s-task:0x%x add into task buffer, total:%d", (int32_t)entry.id.taskId,
(int32_t)taosArrayGetSize(pExecNode->pTaskList));
}
}
}
}
void removeStreamTasksInBuf(SStreamObj* pStream, SStreamExecNodeInfo * pExecNode) {
int32_t level = taosArrayGetSize(pStream->tasks);
for (int32_t i = 0; i < level; i++) {
SArray *pLevel = taosArrayGetP(pStream->tasks, i);
@ -2270,26 +2376,172 @@ static void keepStreamTasksInBuf(SStreamObj *pStream, SStreamVnodeRevertIndex *p
int32_t numOfTasks = taosArrayGetSize(pLevel);
for (int32_t j = 0; j < numOfTasks; j++) {
SStreamTask *pTask = taosArrayGetP(pLevel, j);
int64_t keys[2] = {pTask->id.streamId, pTask->id.taskId};
void *p = taosHashGet(pExecNode->pTaskMap, keys, sizeof(keys));
if (p == NULL) {
STaskStatusEntry entry = {
.streamId = pTask->id.streamId, .taskId = pTask->id.taskId, .status = TASK_STATUS__STOP};
taosArrayPush(pExecNode->pTaskList, &entry);
STaskId id = {.streamId = pTask->id.streamId, .taskId = pTask->id.taskId};
void *p = taosHashGet(pExecNode->pTaskMap, &id, sizeof(id));
if (p != NULL) {
taosHashRemove(pExecNode->pTaskMap, &id, sizeof(id));
for(int32_t k = 0; k < taosArrayGetSize(pExecNode->pTaskList); ++k) {
STaskId* pId = taosArrayGet(pExecNode->pTaskList, k);
if (pId->taskId == id.taskId && pId->streamId == id.streamId) {
taosArrayRemove(pExecNode->pTaskList, k);
mInfo("s-task:0x%x removed from buffer, remain:%d", (int32_t)id.taskId,
(int32_t)taosArrayGetSize(pExecNode->pTaskList));
break;
}
}
int32_t ordinal = taosArrayGetSize(pExecNode->pTaskList) - 1;
taosHashPut(pExecNode->pTaskMap, keys, sizeof(keys), &ordinal, sizeof(ordinal));
}
}
}
ASSERT(taosHashGetSize(pExecNode->pTaskMap) == taosArrayGetSize(pExecNode->pTaskList));
}
static STrans* doCreateTrans(SMnode* pMnode, SStreamObj* pStream, const char* name) {
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, NULL, name);
if (pTrans == NULL) {
mError("failed to build trans:%s, reason: %s", name, tstrerror(TSDB_CODE_OUT_OF_MEMORY));
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
mDebug("start to build stream:0x%" PRIx64 " task DAG update", pStream->uid);
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
if (mndTransCheckConflict(pMnode, pTrans) != 0) {
terrno = TSDB_CODE_MND_TRANS_CONFLICT;
mError("failed to build trans:%s for stream:0x%" PRIx64 " code:%s", name, pStream->uid, tstrerror(terrno));
mndTransDrop(pTrans);
return NULL;
}
terrno = 0;
return pTrans;
}
int32_t createStreamResetStatusTrans(SMnode* pMnode, SStreamObj* pStream) {
STrans *pTrans = doCreateTrans(pMnode, pStream, "stream-task-reset");
if (pTrans == NULL) {
return terrno;
}
taosWLockLatch(&pStream->lock);
int32_t numOfLevels = taosArrayGetSize(pStream->tasks);
for (int32_t j = 0; j < numOfLevels; ++j) {
SArray *pLevel = taosArrayGetP(pStream->tasks, j);
int32_t numOfTasks = taosArrayGetSize(pLevel);
for (int32_t k = 0; k < numOfTasks; ++k) {
SStreamTask *pTask = taosArrayGetP(pLevel, k);
// todo extract method, with pause stream task
SVResetStreamTaskReq* pReq = taosMemoryCalloc(1, sizeof(SVResetStreamTaskReq));
if (pReq == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
mError("failed to malloc in reset stream, size:%" PRIzu ", code:%s", sizeof(SVResetStreamTaskReq),
tstrerror(TSDB_CODE_OUT_OF_MEMORY));
return terrno;
}
pReq->head.vgId = htonl(pTask->info.nodeId);
pReq->taskId = pTask->id.taskId;
pReq->streamId = pTask->id.streamId;
STransAction action = {0};
initTransAction(&action, pReq, sizeof(SVResetStreamTaskReq), TDMT_VND_STREAM_TASK_RESET, &pTask->info.epSet);
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
taosMemoryFree(pReq);
taosWUnLockLatch(&pStream->lock);
mndTransDrop(pTrans);
return terrno;
}
}
}
taosWUnLockLatch(&pStream->lock);
int32_t code = mndPersistTransLog(pStream, pTrans);
if (code != TSDB_CODE_SUCCESS) {
sdbRelease(pMnode->pSdb, pStream);
return -1;
}
if (mndTransPrepare(pMnode, pTrans) != 0) {
mError("trans:%d, failed to prepare update stream trans since %s", pTrans->id, terrstr());
sdbRelease(pMnode->pSdb, pStream);
mndTransDrop(pTrans);
return -1;
}
sdbRelease(pMnode->pSdb, pStream);
mndTransDrop(pTrans);
return TSDB_CODE_ACTION_IN_PROGRESS;
}
int32_t mndResetFromCheckpoint(SMnode* pMnode) {
// find the checkpoint trans id
int32_t transId = 0;
{
SSdb *pSdb = pMnode->pSdb;
STrans *pTrans = NULL;
void* pIter = NULL;
while (1) {
pIter = sdbFetch(pSdb, SDB_TRANS, pIter, (void **)&pTrans);
if (pIter == NULL) {
break;
}
if (strncmp(pTrans->opername, MND_STREAM_CHECKPOINT_NAME, tListLen(pTrans->opername) - 1) == 0) {
transId = pTrans->id;
sdbRelease(pSdb, pTrans);
sdbCancelFetch(pSdb, pIter);
break;
}
sdbRelease(pSdb, pTrans);
}
}
if (transId == 0) {
mError("failed to find the checkpoint trans, reset not executed");
return TSDB_CODE_SUCCESS;
}
STrans* pTrans = mndAcquireTrans(pMnode, transId);
mndKillTrans(pMnode, pTrans);
// set all tasks status to be normal, refactor later to be stream level, instead of vnode level.
SSdb *pSdb = pMnode->pSdb;
SStreamObj *pStream = NULL;
void *pIter = NULL;
while (1) {
pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream);
if (pIter == NULL) {
break;
}
mDebug("stream:%s (0x%" PRIx64 ") reset checkpoint procedure, create reset trans", pStream->name, pStream->uid);
int32_t code = createStreamResetStatusTrans(pMnode, pStream);
if (code != TSDB_CODE_SUCCESS) {
sdbCancelFetch(pSdb, pIter);
return code;
}
}
return 0;
}
// todo: this process should be executed by the write queue worker of the mnode
int32_t mndProcessStreamHb(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
SMnode *pMnode = pReq->info.node;
SStreamHbMsg req = {0};
int32_t code = TSDB_CODE_SUCCESS;
bool checkpointFailed = false;
int64_t activeCheckpointId = 0;
SDecoder decoder = {0};
tDecoderInit(&decoder, pReq->pCont, pReq->contLen);
@ -2301,7 +2553,6 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) {
}
tDecoderClear(&decoder);
// int64_t now = taosGetTimestampSec();
mTrace("receive stream-meta hb from vgId:%d, active numOfTasks:%d", req.vgId, req.numOfTasks);
taosThreadMutexLock(&execNodeList.lock);
@ -2312,80 +2563,60 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) {
for (int32_t i = 0; i < req.numOfTasks; ++i) {
STaskStatusEntry *p = taosArrayGet(req.pTaskStatus, i);
int64_t k[2] = {p->streamId, p->taskId};
int32_t *index = taosHashGet(execNodeList.pTaskMap, &k, sizeof(k));
if (index == NULL) {
STaskStatusEntry *pEntry = taosHashGet(execNodeList.pTaskMap, &p->id, sizeof(p->id));
if (pEntry == NULL) {
mError("s-task:0x%" PRIx64 " not found in mnode task list", p->id.taskId);
continue;
}
STaskStatusEntry *pStatusEntry = taosArrayGet(execNodeList.pTaskList, *index);
pStatusEntry->status = p->status;
if (p->stage != pEntry->stage && pEntry->stage != -1) {
int32_t numOfNodes = taosArrayGetSize(execNodeList.pNodeEntryList);
for(int32_t j = 0; j < numOfNodes; ++j) {
SNodeEntry* pNodeEntry = taosArrayGet(execNodeList.pNodeEntryList, j);
if (pNodeEntry->nodeId == pEntry->nodeId) {
mInfo("vgId:%d stage updated, from %d to %d, nodeUpdate trigger by s-task:0x%" PRIx64,
pEntry->nodeId, pEntry->stage, p->stage, pEntry->id.taskId);
pNodeEntry->stageUpdated = true;
pEntry->stage = p->stage;
break;
}
}
} else {
streamTaskStatusCopy(pEntry, p);
if (p->activeCheckpointId != 0) {
if (activeCheckpointId != 0) {
ASSERT(activeCheckpointId == p->activeCheckpointId);
} else {
activeCheckpointId = p->activeCheckpointId;
}
if (p->checkpointFailed) {
checkpointFailed = p->checkpointFailed;
}
}
}
pEntry->status = p->status;
if (p->status != TASK_STATUS__NORMAL) {
mDebug("received s-task:0x%x not in ready status:%s", p->taskId, streamGetTaskStatusStr(p->status));
mDebug("received s-task:0x%"PRIx64" not in ready status:%s", p->id.taskId, streamGetTaskStatusStr(p->status));
}
}
// current checkpoint is failed, rollback from the checkpoint trans
// kill the checkpoint trans and then set all tasks status to be normal
if (checkpointFailed && activeCheckpointId != 0) {
if (execNodeList.activeCheckpoint != activeCheckpointId) {
mInfo("checkpointId:%"PRId64" failed, issue task-reset trans to reset all tasks status", activeCheckpointId);
execNodeList.activeCheckpoint = activeCheckpointId;
mndResetFromCheckpoint(pMnode);
} else {
mDebug("checkpoint:%"PRId64" reset has issued already, ignore it", activeCheckpointId);
}
}
taosThreadMutexUnlock(&execNodeList.lock);
taosArrayDestroy(req.pTaskStatus);
// bool nodeChanged = false;
// SArray* pList = taosArrayInit(4, sizeof(int32_t));
/*
// record the timeout node
for(int32_t i = 0; i < taosArrayGetSize(execNodeList.pNodeEntryList); ++i) {
SNodeEntry* pEntry = taosArrayGet(execNodeList.pNodeEntryList, i);
int64_t duration = now - pEntry->hbTimestamp;
if (duration > MND_STREAM_HB_INTERVAL) { // execNode timeout, try next
taosArrayPush(pList, &pEntry);
mWarn("nodeId:%d stream node timeout, since last hb:%"PRId64"s", pEntry->nodeId, duration);
continue;
}
if (pEntry->nodeId != req.vgId) {
continue;
}
pEntry->hbTimestamp = now;
// check epset to identify whether the node has been transferred to other dnodes.
// node the epset is changed, which means the node transfer has occurred for this node.
// if (!isEpsetEqual(&pEntry->epset, &req.epset)) {
// nodeChanged = true;
// break;
// }
}
// todo handle the node timeout case. Once the vnode is off-line, we should check the dnode status from mnode,
// to identify whether the dnode is truely offline or not.
// handle the node changed case
if (!nodeChanged) {
return TSDB_CODE_SUCCESS;
}
int32_t nodeId = req.vgId;
{// check all streams that involved this vnode should update the epset info
SStreamObj *pStream = NULL;
void *pIter = NULL;
while (1) {
pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream);
if (pIter == NULL) {
break;
}
// update the related upstream and downstream tasks, todo remove this, no need this function
taosWLockLatch(&pStream->lock);
// streamTaskUpdateEpInfo(pStream->tasks, req.vgId, &req.epset);
// streamTaskUpdateEpInfo(pStream->pHTasksList, req.vgId, &req.epset);
taosWUnLockLatch(&pStream->lock);
// code = createStreamUpdateTrans(pMnode, pStream, nodeId, );
// if (code != TSDB_CODE_SUCCESS) {
// todo
//// }
// }
}
*/
return TSDB_CODE_SUCCESS;
}

View File

@ -607,16 +607,6 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) {
code = TSDB_CODE_ACTION_IN_PROGRESS;
}
char detail[4000] = {0};
char sql[3000] = {0};
strncpy(sql, createTopicReq.sql, 2999);
SName tableName = {0};
tNameFromString(&tableName, createTopicReq.subStbName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
sprintf(detail, "igExists:%d, subStbName:%s, subType:%d, withMeta:%d, sql:%s",
createTopicReq.igExists, tableName.tname, createTopicReq.subType, createTopicReq.withMeta, sql);
SName dbname = {0};
tNameFromString(&dbname, createTopicReq.subDbName, T_NAME_ACCT | T_NAME_DB);
@ -624,7 +614,8 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) {
tNameFromString(&topicName, createTopicReq.name, T_NAME_ACCT | T_NAME_DB);
//reuse this function for topic
auditRecord(pReq, pMnode->clusterId, "createTopic", topicName.dbname, dbname.dbname, detail);
auditRecord(pReq, pMnode->clusterId, "createTopic", topicName.dbname, dbname.dbname,
createTopicReq.sql, strlen(createTopicReq.sql));
_OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
@ -675,10 +666,12 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
if (pTopic == NULL) {
if (dropReq.igNotExists) {
mInfo("topic:%s, not exist, ignore not exist is set", dropReq.name);
tFreeSMDropTopicReq(&dropReq);
return 0;
} else {
terrno = TSDB_CODE_MND_TOPIC_NOT_EXIST;
mError("topic:%s, failed to drop since %s", dropReq.name, terrstr());
tFreeSMDropTopicReq(&dropReq);
return -1;
}
}
@ -819,17 +812,17 @@ end:
mndTransDrop(pTrans);
if (code != 0) {
mError("topic:%s, failed to drop since %s", dropReq.name, terrstr());
tFreeSMDropTopicReq(&dropReq);
return code;
}
char detail[100] = {0};
sprintf(detail, "igNotExists:%d", dropReq.igNotExists);
SName name = {0};
tNameFromString(&name, dropReq.name, T_NAME_ACCT | T_NAME_DB);
//reuse this function for topic
auditRecord(pReq, pMnode->clusterId, "dropTopic", name.dbname, "", detail);
auditRecord(pReq, pMnode->clusterId, "dropTopic", name.dbname, "", dropReq.sql, dropReq.sqlLen);
tFreeSMDropTopicReq(&dropReq);
return TSDB_CODE_ACTION_IN_PROGRESS;
}

View File

@ -544,7 +544,9 @@ STrans *mndAcquireTrans(SMnode *pMnode, int32_t transId) {
if (pTrans == NULL) {
terrno = TSDB_CODE_MND_TRANS_NOT_EXIST;
} else {
#ifdef WINDOWS
taosThreadMutexInit(&pTrans->mutex, NULL);
#endif
}
return pTrans;
}
@ -1697,7 +1699,6 @@ static int32_t mndRetrieveTrans(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl
int32_t numOfRows = 0;
STrans *pTrans = NULL;
int32_t cols = 0;
char *pWrite;
while (numOfRows < rows) {
pShow->pIter = sdbFetch(pSdb, SDB_TRANS, pShow->pIter, (void **)&pTrans);

View File

@ -1275,11 +1275,7 @@ static int32_t mndProcessCreateUserReq(SRpcMsg *pReq) {
code = mndCreateUser(pMnode, pOperUser->acct, &createReq, pReq);
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
char detail[1000] = {0};
sprintf(detail, "createType:%d, enable:%d, superUser:%d, sysInfo:%d", createReq.createType, createReq.enable,
createReq.superUser, createReq.sysInfo);
auditRecord(pReq, pMnode->clusterId, "createUser", createReq.user, "", detail);
auditRecord(pReq, pMnode->clusterId, "createUser", createReq.user, "", createReq.sql, createReq.sqlLen);
_OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
@ -1289,6 +1285,7 @@ _OVER:
mndReleaseUser(pMnode, pUser);
mndReleaseUser(pMnode, pOperUser);
tFreeSCreateUserReq(&createReq);
return code;
}
@ -1818,41 +1815,51 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) {
code = mndAlterUser(pMnode, pUser, &newUser, pReq);
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
char detail[1000] = {0};
sprintf(detail, "alterType:%s, enable:%d, superUser:%d, sysInfo:%d, tabName:%s, password:",
mndUserAuditTypeStr(alterReq.alterType), alterReq.enable, alterReq.superUser, alterReq.sysInfo,
alterReq.tabName);
if (alterReq.alterType == TSDB_ALTER_USER_PASSWD) {
if(alterReq.alterType == TSDB_ALTER_USER_PASSWD){
char detail[1000] = {0};
sprintf(detail, "alterType:%s, enable:%d, superUser:%d, sysInfo:%d, tabName:%s, password:xxx",
mndUserAuditTypeStr(alterReq.alterType), alterReq.enable, alterReq.superUser, alterReq.sysInfo,
alterReq.tabName);
auditRecord(pReq, pMnode->clusterId, "alterUser", alterReq.user, "", detail);
} else if (alterReq.alterType == TSDB_ALTER_USER_SUPERUSER || alterReq.alterType == TSDB_ALTER_USER_ENABLE ||
alterReq.alterType == TSDB_ALTER_USER_SYSINFO) {
auditRecord(pReq, pMnode->clusterId, "alterUser", alterReq.user, "", detail);
} else if (alterReq.alterType == TSDB_ALTER_USER_ADD_READ_DB || alterReq.alterType == TSDB_ALTER_USER_ADD_WRITE_DB ||
alterReq.alterType == TSDB_ALTER_USER_ADD_ALL_DB || alterReq.alterType == TSDB_ALTER_USER_ADD_READ_TABLE ||
alterReq.alterType == TSDB_ALTER_USER_ADD_WRITE_TABLE ||
alterReq.alterType == TSDB_ALTER_USER_ADD_ALL_TABLE) {
if (strcmp(alterReq.objname, "1.*") != 0) {
auditRecord(pReq, pMnode->clusterId, "alterUser", alterReq.user, "", detail, strlen(detail));
}
else if(alterReq.alterType == TSDB_ALTER_USER_SUPERUSER ||
alterReq.alterType == TSDB_ALTER_USER_ENABLE ||
alterReq.alterType == TSDB_ALTER_USER_SYSINFO){
auditRecord(pReq, pMnode->clusterId, "alterUser", alterReq.user, "", alterReq.sql, alterReq.sqlLen);
}
else if(alterReq.alterType == TSDB_ALTER_USER_ADD_READ_DB||
alterReq.alterType == TSDB_ALTER_USER_ADD_WRITE_DB||
alterReq.alterType == TSDB_ALTER_USER_ADD_ALL_DB||
alterReq.alterType == TSDB_ALTER_USER_ADD_READ_TABLE||
alterReq.alterType == TSDB_ALTER_USER_ADD_WRITE_TABLE||
alterReq.alterType == TSDB_ALTER_USER_ADD_ALL_TABLE){
if (strcmp(alterReq.objname, "1.*") != 0){
SName name = {0};
tNameFromString(&name, alterReq.objname, T_NAME_ACCT | T_NAME_DB);
auditRecord(pReq, pMnode->clusterId, "GrantPrivileges", alterReq.user, name.dbname, detail);
} else {
auditRecord(pReq, pMnode->clusterId, "GrantPrivileges", alterReq.user, "*", detail);
auditRecord(pReq, pMnode->clusterId, "GrantPrivileges", alterReq.user, name.dbname,
alterReq.sql, alterReq.sqlLen);
}else{
auditRecord(pReq, pMnode->clusterId, "GrantPrivileges", alterReq.user, "*",
alterReq.sql, alterReq.sqlLen);
}
} else if (alterReq.alterType == TSDB_ALTER_USER_ADD_SUBSCRIBE_TOPIC) {
auditRecord(pReq, pMnode->clusterId, "GrantPrivileges", alterReq.user, alterReq.objname, detail);
} else if (alterReq.alterType == TSDB_ALTER_USER_REMOVE_SUBSCRIBE_TOPIC) {
auditRecord(pReq, pMnode->clusterId, "RevokePrivileges", alterReq.user, alterReq.objname, detail);
} else {
if (strcmp(alterReq.objname, "1.*") != 0) {
}
else if(alterReq.alterType == TSDB_ALTER_USER_ADD_SUBSCRIBE_TOPIC){
auditRecord(pReq, pMnode->clusterId, "GrantPrivileges", alterReq.user, alterReq.objname,
alterReq.sql, alterReq.sqlLen);
}
else if(alterReq.alterType == TSDB_ALTER_USER_REMOVE_SUBSCRIBE_TOPIC){
auditRecord(pReq, pMnode->clusterId, "RevokePrivileges", alterReq.user, alterReq.objname,
alterReq.sql, alterReq.sqlLen);
}
else{
if (strcmp(alterReq.objname, "1.*") != 0){
SName name = {0};
tNameFromString(&name, alterReq.objname, T_NAME_ACCT | T_NAME_DB);
auditRecord(pReq, pMnode->clusterId, "RevokePrivileges", alterReq.user, name.dbname, detail);
} else {
auditRecord(pReq, pMnode->clusterId, "RevokePrivileges", alterReq.user, "*", detail);
auditRecord(pReq, pMnode->clusterId, "RevokePrivileges", alterReq.user, name.dbname,
alterReq.sql, alterReq.sqlLen);
}else{
auditRecord(pReq, pMnode->clusterId, "RevokePrivileges", alterReq.user, "*",
alterReq.sql, alterReq.sqlLen);
}
}
@ -1926,7 +1933,7 @@ static int32_t mndProcessDropUserReq(SRpcMsg *pReq) {
code = mndDropUser(pMnode, pReq, pUser);
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
auditRecord(pReq, pMnode->clusterId, "dropUser", dropReq.user, "", "");
auditRecord(pReq, pMnode->clusterId, "dropUser", dropReq.user, "", dropReq.sql, dropReq.sqlLen);
_OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
@ -1934,6 +1941,7 @@ _OVER:
}
mndReleaseUser(pMnode, pUser);
tFreeSDropUserReq(&dropReq);
return code;
}
@ -2423,6 +2431,47 @@ int32_t mndUserRemoveDb(SMnode *pMnode, STrans *pTrans, char *db) {
return code;
}
int32_t mndUserRemoveStb(SMnode *pMnode, STrans *pTrans, char *stb) {
int32_t code = 0;
SSdb *pSdb = pMnode->pSdb;
int32_t len = strlen(stb) + 1;
void *pIter = NULL;
SUserObj *pUser = NULL;
SUserObj newUser = {0};
while (1) {
pIter = sdbFetch(pSdb, SDB_USER, pIter, (void **)&pUser);
if (pIter == NULL) break;
code = -1;
if (mndUserDupObj(pUser, &newUser) != 0) {
break;
}
bool inRead = (taosHashGet(newUser.readTbs, stb, len) != NULL);
bool inWrite = (taosHashGet(newUser.writeTbs, stb, len) != NULL);
if (inRead || inWrite) {
(void)taosHashRemove(newUser.readTbs, stb, len);
(void)taosHashRemove(newUser.writeTbs, stb, len);
SSdbRaw *pCommitRaw = mndUserActionEncode(&newUser);
if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
break;
}
(void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY);
}
mndUserFreeObj(&newUser);
sdbRelease(pSdb, pUser);
code = 0;
}
if (pUser != NULL) sdbRelease(pSdb, pUser);
if (pIter != NULL) sdbCancelFetch(pSdb, pIter);
mndUserFreeObj(&newUser);
return code;
}
int32_t mndUserRemoveTopic(SMnode *pMnode, STrans *pTrans, char *topic) {
int32_t code = 0;
SSdb *pSdb = pMnode->pSdb;

View File

@ -2177,11 +2177,7 @@ static int32_t mndProcessRedistributeVgroupMsg(SRpcMsg *pReq) {
char obj[33] = {0};
sprintf(obj, "%d", req.vgId);
char detail[1000] = {0};
sprintf(detail, "dnodeId1:%d, dnodeId2:%d, dnodeId3:%d",
req.dnodeId1, req.dnodeId2, req.dnodeId3);
auditRecord(pReq, pMnode->clusterId, "RedistributeVgroup", obj, "", detail);
auditRecord(pReq, pMnode->clusterId, "RedistributeVgroup", obj, "", req.sql, req.sqlLen);
_OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
@ -2197,6 +2193,7 @@ _OVER:
mndReleaseDnode(pMnode, pOld3);
mndReleaseVgroup(pMnode, pVgroup);
mndReleaseDb(pMnode, pDb);
tFreeSRedistributeVgroupReq(&req);
return code;
}
@ -2993,7 +2990,7 @@ static int32_t mndProcessBalanceVgroupMsg(SRpcMsg *pReq) {
code = mndBalanceVgroup(pMnode, pReq, pArray);
}
auditRecord(pReq, pMnode->clusterId, "balanceVgroup", "", "", "");
auditRecord(pReq, pMnode->clusterId, "balanceVgroup", "", "", req.sql, req.sqlLen);
_OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
@ -3001,6 +2998,7 @@ _OVER:
}
taosArrayDestroy(pArray);
tFreeSBalanceVgroupReq(&req);
return code;
}

View File

@ -65,7 +65,7 @@ TEST_F(MndTestProfile, 01_ConnectMsg) {
connId = connectRsp.connId;
}
TEST_F(MndTestProfile, 02_ConnectMsg_InvalidDB) {
TEST_F(MndTestProfile, 02_ConnectMsg_NotExistDB) {
char passwd[] = "taosdata";
char secretEncrypt[TSDB_PASSWORD_LEN + 1] = {0};
taosEncryptPass_c((uint8_t*)passwd, strlen(passwd), secretEncrypt);
@ -73,7 +73,7 @@ TEST_F(MndTestProfile, 02_ConnectMsg_InvalidDB) {
SConnectReq connectReq = {0};
connectReq.pid = 1234;
strcpy(connectReq.app, "mnode_test_profile");
strcpy(connectReq.db, "invalid_db");
strcpy(connectReq.db, "not_exist_db");
strcpy(connectReq.user, "root");
strcpy(connectReq.passwd, secretEncrypt);
strcpy(connectReq.sVer, version);
@ -84,7 +84,7 @@ TEST_F(MndTestProfile, 02_ConnectMsg_InvalidDB) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CONNECT, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_DB);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_DB_NOT_EXIST);
ASSERT_EQ(pRsp->contLen, 0);
}

View File

@ -448,7 +448,7 @@ TEST_F(MndTestStb, 02_Alter_Stb_AddTag) {
{
void* pReq = BuildAlterStbAddTagReq("1.d3.stb", "tag4", &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_DB);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_DB_NOT_EXIST);
}
{
@ -665,7 +665,7 @@ TEST_F(MndTestStb, 06_Alter_Stb_AddColumn) {
{
void* pReq = BuildAlterStbAddColumnReq("1.d7.stb", "tag4", &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_DB);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_DB_NOT_EXIST);
}
{

Some files were not shown because too many files have changed in this diff Show More