diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000..be2be525ba --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,5 @@ +# Security Policy + +## Reporting a Vulnerability + +Please submit CVE to https://github.com/taosdata/TDengine/security/advisories. diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index e3e48ac3a1..a963e4497f 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -399,7 +399,7 @@ if(${BUILD_WITH_COS}) INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include) MESSAGE("$ENV{HOME}/.cos-local.1/include") - set(CMAKE_BUILD_TYPE debug) + set(CMAKE_BUILD_TYPE Release) set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) set(CMAKE_PROJECT_NAME cos_c_sdk) diff --git a/deps/arm/dm_static/libdmodule.a b/deps/arm/dm_static/libdmodule.a index 5f0f0e38b3..f71b97dc2b 100644 Binary files a/deps/arm/dm_static/libdmodule.a and b/deps/arm/dm_static/libdmodule.a differ diff --git a/deps/x86/dm_static/libdmodule.a b/deps/x86/dm_static/libdmodule.a index f5548e6988..348568f8d1 100644 Binary files a/deps/x86/dm_static/libdmodule.a and b/deps/x86/dm_static/libdmodule.a differ diff --git a/docs/en/02-intro/index.md b/docs/en/02-intro/index.md index 4d65c86371..f9fe68b47a 100644 --- a/docs/en/02-intro/index.md +++ b/docs/en/02-intro/index.md @@ -4,11 +4,11 @@ description: This document introduces the major features, competitive advantages toc_max_heading_level: 2 --- -TDengine is an [open source](https://tdengine.com/tdengine/open-source-time-series-database/), [high-performance](https://tdengine.com/tdengine/high-performance-time-series-database/), [cloud native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation. +TDengine is a big data platform designed and optimized for IoT (Internet of Things) and Industrial Internet. It can safely and effetively converge, store, process and distribute high volume data (TB or even PB) generated everyday by a lot of devices and data acquisition units, monitor and alert business operation status in real time and provide real time business insight. The core component of TDengine is TDengine OSS, which is a high performance, open source, cloud native and simplified time series database. This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine. -## Major Features +## Major Features of TDengine OSS The major features are listed below: @@ -132,3 +132,9 @@ As a high-performance, scalable and SQL supported time-series database, TDengine - [Introduction to Time-Series Database](https://tdengine.com/tsdb/) - [Introduction to TDengine competitive advantages](https://tdengine.com/tdengine/) + +## Products + +There are two products offered by TDengine: TDengine Enterprise and TDengine Cloud, for details please refer to +- [TDengine Enterprise](https://www.taosdata.com/tdengine-pro) +- [TDengine Cloud](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn) diff --git a/docs/en/07-develop/01-connect/index.md b/docs/en/07-develop/01-connect/index.md index 49c0b11a36..3ca44783c4 100644 --- a/docs/en/07-develop/01-connect/index.md +++ b/docs/en/07-develop/01-connect/index.md @@ -221,7 +221,7 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive && tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1 ``` -> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please check available version from [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases). +> Version number `v1.0.2` is only for example, it can be replaced to any newer version. **Non-Swoole Environment: ** diff --git a/docs/en/07-develop/02-model/index.mdx b/docs/en/07-develop/02-model/index.mdx index db5a259cfe..4524a66a41 100644 --- a/docs/en/07-develop/02-model/index.mdx +++ b/docs/en/07-develop/02-model/index.mdx @@ -55,7 +55,7 @@ At most 4096 columns are allowed in a STable. If there are more than 4096 of met ## Create Table -A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Additionally, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the meters in [Table 1](/tdinternal/arch#model_table1), the table can be created using below SQL statement. +A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Additionally, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the smart meters table, the table can be created using below SQL statement. ```sql CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2); diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx index f833dbf439..ecadb5a499 100644 --- a/docs/en/07-develop/07-tmq.mdx +++ b/docs/en/07-develop/07-tmq.mdx @@ -352,10 +352,10 @@ You configure the following parameters when creating a consumer: | `td.connect.port` | string | Port of the server side | | | `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. Each topic can create up to 100 consumer groups. | | `client.id` | string | Client ID | Maximum length: 192. | -| `auto.offset.reset` | enum | Initial offset for the consumer group | `earliest`: subscribe from the earliest data, this is the default behavior; `latest`: subscribe from the latest data; or `none`: can't subscribe without committed offset| +| `auto.offset.reset` | enum | Initial offset for the consumer group | `earliest`: subscribe from the earliest data, this is the default behavior(version < 3.2.0.0); `latest`: subscribe from the latest data, this is the default behavior(version >= 3.2.0.0); or `none`: can't subscribe without committed offset| | `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true | | `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds | -| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages | default value: false +| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages. Not applicable if subscribe to a column (tbname can be written as a column in the subquery statement during column subscriptions) (This parameter has been deprecated since version 3.2.0.0 and remains true) | default value: false The method of specifying these parameters depends on the language used: @@ -458,7 +458,19 @@ from taos.tmq import Consumer # Syntax: `consumer = Consumer(configs)` # # Example: -consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"}) +consumer = Consumer( + { + "group.id": "local", + "client.id": "1", + "enable.auto.commit": "true", + "auto.commit.interval.ms": "1000", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "auto.offset.reset": "earliest", + "msg.with.table.name": "true", + } +) ``` diff --git a/docs/en/12-taos-sql/06-select.md b/docs/en/12-taos-sql/06-select.md index d38fc86975..f1e19a5449 100755 --- a/docs/en/12-taos-sql/06-select.md +++ b/docs/en/12-taos-sql/06-select.md @@ -24,7 +24,7 @@ SELECT [hints] [DISTINCT] select_list hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */ hint: - BATCH_SCAN | NO_BATCH_SCAN + BATCH_SCAN | NO_BATCH_SCAN | SORT_FOR_GROUP select_list: select_expr [, select_expr] ... @@ -87,15 +87,17 @@ Hints are a means of user control over query optimization for individual stateme The list of currently supported Hints is as follows: -| **Hint** | **Params** | **Comment** | **Scopt** | -| :-----------: | -------------- | -------------------------- | -------------------------- | -| BATCH_SCAN | None | Batch table scan | JOIN statment for stable | -| NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable | +| **Hint** | **Params** | **Comment** | **Scopt** | +| :-----------: | -------------- | -------------------------- | -----------------------------------| +| BATCH_SCAN | None | Batch table scan | JOIN statment for stable | +| NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable | +| SORT_FOR_GROUP| None | Use sort for partition | With normal column in partition by list | For example: ```sql SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts; +SELECT /*+ SORT_FOR_GROUP() */ count(*), c1 FROM stable1 PARTITION BY c1; ``` ## Lists diff --git a/docs/en/12-taos-sql/16-operators.md b/docs/en/12-taos-sql/16-operators.md index 6b7adb4a3d..ce8ab8a03c 100644 --- a/docs/en/12-taos-sql/16-operators.md +++ b/docs/en/12-taos-sql/16-operators.md @@ -54,6 +54,7 @@ LIKE is used together with wildcards to match strings. Its usage is described as MATCH and NMATCH are used together with regular expressions to match strings. Their usage is described as follows: - Use POSIX regular expression syntax. For more information, see Regular Expressions. +- The `MATCH` operator returns true when the regular expression is matched. The `NMATCH` operator returns true when the regular expression is not matched. - Regular expression can be used against only table names, i.e. `tbname`, and tags/columns of binary/nchar types. - The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on the client side, and will take effect after restarting the client. diff --git a/docs/en/12-taos-sql/20-keywords.md b/docs/en/12-taos-sql/20-keywords.md index 983d4f63c9..36cbc0948f 100644 --- a/docs/en/12-taos-sql/20-keywords.md +++ b/docs/en/12-taos-sql/20-keywords.md @@ -180,6 +180,7 @@ The following list shows all reserved keywords: - MAX_DELAY - BWLIMIT - MAXROWS +- MAX_SPEED - MERGE - META - MINROWS diff --git a/docs/en/12-taos-sql/22-meta.md b/docs/en/12-taos-sql/22-meta.md index 37304633e7..fad479d9d3 100644 --- a/docs/en/12-taos-sql/22-meta.md +++ b/docs/en/12-taos-sql/22-meta.md @@ -26,75 +26,85 @@ This document introduces the tables of INFORMATION_SCHEMA and their structure. ## INS_DNODES -Provides information about dnodes. Similar to SHOW DNODES. +Provides information about dnodes. Similar to SHOW DNODES. Users whose SYSINFO attribute is 0 can't view this table. | # | **Column** | **Data Type** | **Description** | | --- | :------------: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | | 1 | vnodes | SMALLINT | Current number of vnodes on the dnode. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 2 | support_vnodes | SMALLINT | Maximum number of vnodes on the dnode | -| 3 | status | BINARY(10) | Current status | -| 4 | note | BINARY(256) | Reason for going offline or other information | +| 3 | status | VARCHAR(10) | Current status | +| 4 | note | VARCHAR(256) | Reason for going offline or other information | | 5 | id | SMALLINT | Dnode ID | -| 6 | endpoint | BINARY(134) | Dnode endpoint | +| 6 | endpoint | VARCHAR(134) | Dnode endpoint | | 7 | create | TIMESTAMP | Creation time | ## INS_MNODES -Provides information about mnodes. Similar to SHOW MNODES. +Provides information about mnodes. Similar to SHOW MNODES. Users whose SYSINFO attribute is 0 can't view this table. | # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------- | ------------------------------------------ | | 1 | id | SMALLINT | Mnode ID | -| 2 | endpoint | BINARY(134) | Mnode endpoint | -| 3 | role | BINARY(10) | Current role | +| 2 | endpoint | VARCHAR(134) | Mnode endpoint | +| 3 | role | VARCHAR(10) | Current role | | 4 | role_time | TIMESTAMP | Time at which the current role was assumed | | 5 | create_time | TIMESTAMP | Creation time | ## INS_QNODES -Provides information about qnodes. Similar to SHOW QNODES. +Provides information about qnodes. Similar to SHOW QNODES. Users whose SYSINFO attribute is 0 can't view this table. | # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------- | --------------- | | 1 | id | SMALLINT | Qnode ID | -| 2 | endpoint | BINARY(134) | Qnode endpoint | +| 2 | endpoint | VARCHAR(134) | Qnode endpoint | +| 3 | create_time | TIMESTAMP | Creation time | + +## INS_SNODES + +Provides information about snodes. Similar to SHOW SNODES. Users whose SYSINFO attribute is 0 can't view this table. + +| # | **Column** | **Data Type** | **Description** | +| --- | :---------: | ------------- | --------------- | +| 1 | id | SMALLINT | Snode ID | +| 2 | endpoint | VARCHAR(134) | Snode endpoint | | 3 | create_time | TIMESTAMP | Creation time | ## INS_CLUSTER -Provides information about the cluster. +Provides information about the cluster. Users whose SYSINFO attribute is 0 can't view this table. | # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------- | --------------- | | 1 | id | BIGINT | Cluster ID | -| 2 | name | BINARY(134) | Cluster name | +| 2 | name | VARCHAR(134) | Cluster name | | 3 | create_time | TIMESTAMP | Creation time | ## INS_DATABASES Provides information about user-created databases. Similar to SHOW DATABASES. -| # | **Column** | **Data Type** | **Description** | +| # | **Column** | **Data Type** | **Description** | | --- | :------------------: | ---------------- | ------------------------------------------------ | -| 1| name| BINARY(32)| Database name | +| 1 | name | VARCHAR(64) | Database name | | 2 | create_time | TIMESTAMP | Creation time | | 3 | ntables | INT | Number of standard tables and subtables (not including supertables) | | 4 | vgroups | INT | Number of vgroups. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 6 | replica | INT | Number of replicas. It should be noted that `replica` is a TDengine keyword and needs to be escaped with ` when used as a column name. | -| 7 | strict | BINARY(4) | Obsoleted | -| 8 | duration | INT | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. | -| 9 | keep | INT | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. | +| 7 | strict | VARCHAR(4) | Obsoleted | +| 8 | duration | VARCHAR(10) | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. | +| 9 | keep | VARCHAR(32) | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 10 | buffer | INT | Write cache size per vnode, in MB. It should be noted that `buffer` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 11 | pagesize | INT | Page size for vnode metadata storage engine, in KB. It should be noted that `pagesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 12 | pages | INT | Number of pages per vnode metadata storage engine. It should be noted that `pages` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 13 | minrows | INT | Maximum number of records per file block. It should be noted that `minrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 14 | maxrows | INT | Minimum number of records per file block. It should be noted that `maxrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 15 | comp | INT | Compression method. It should be noted that `comp` is a TDengine keyword and needs to be escaped with ` when used as a column name. | -| 16 | precision | BINARY(2) | Time precision. It should be noted that `precision` is a TDengine keyword and needs to be escaped with ` when used as a column name. | -| 17 | status | BINARY(10) | Current database status | -| 18 | retentions | BINARY (60) | Aggregation interval and retention period. It should be noted that `retentions` is a TDengine keyword and needs to be escaped with ` when used as a column name. | +| 16 | precision | VARCHAR(2) | Time precision. It should be noted that `precision` is a TDengine keyword and needs to be escaped with ` when used as a column name. | +| 17 | status | VARCHAR(10) | Current database status | +| 18 | retentions | VARCHAR(60) | Aggregation interval and retention period. It should be noted that `retentions` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 19 | single_stable | BOOL | Whether the database can contain multiple supertables. It should be noted that `single_stable` is a TDengine keyword and needs to be escaped with ` when used as a column name. | -| 20 | cachemodel | BINARY(60) | Caching method for the newest data. It should be noted that `cachemodel` is a TDengine keyword and needs to be escaped with ` when used as a column name. | +| 20 | cachemodel | VARCHAR(60) | Caching method for the newest data. It should be noted that `cachemodel` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 21 | cachesize | INT | Memory per vnode used for caching the newest data. It should be noted that `cachesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 22 | wal_level | INT | WAL level. It should be noted that `wal_level` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 23 | wal_fsync_period | INT | Interval at which WAL is written to disk. It should be noted that `wal_fsync_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. | @@ -111,15 +121,15 @@ Provides information about user-defined functions. | # | **Column** | **Data Type** | **Description** | | --- | :-----------: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| 1 | name | BINARY(64) | Function name | -| 2 | comment | BINARY(255) | Function description. It should be noted that `comment` is a TDengine keyword and needs to be escaped with ` when used as a column name. | +| 1 | name | VARCHAR(64) | Function name | +| 2 | comment | VARCHAR(255) | Function description. It should be noted that `comment` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 3 | aggregate | INT | Whether the UDF is an aggregate function. It should be noted that `aggregate` is a TDengine keyword and needs to be escaped with ` when used as a column name. | -| 4 | output_type | BINARY(31) | Output data type | +| 4 | output_type | VARCHAR(31) | Output data type | | 5 | create_time | TIMESTAMP | Creation time | | 6 | code_len | INT | Length of the source code | | 7 | bufsize | INT | Buffer size | -| 8 | func_language | BINARY(31) | UDF programming language | -| 9 | func_body | BINARY(16384) | UDF function body | +| 8 | func_language | VARCHAR(31) | UDF programming language | +| 9 | func_body | VARCHAR(16384) | UDF function body | | 10 | func_version | INT | UDF function version. starting from 0. Increasing by 1 each time it is updated | ## INS_INDEXES @@ -128,12 +138,12 @@ Provides information about user-created indices. Similar to SHOW INDEX. | # | **Column** | **Data Type** | **Description** | | --- | :--------------: | ------------- | --------------------------------------------------------------------- | -| 1 | db_name | BINARY(32) | Database containing the table with the specified index | -| 2 | table_name | BINARY(192) | Table containing the specified index | -| 3 | index_name | BINARY(192) | Index name | -| 4 | db_name | BINARY(64) | Index column | -| 5 | index_type | BINARY(10) | SMA or tag index | -| 6 | index_extensions | BINARY(256) | Other information For SMA/tag indices, this shows a list of functions | +| 1 | db_name | VARCHAR(32) | Database containing the table with the specified index | +| 2 | table_name | VARCHAR(192) | Table containing the specified index | +| 3 | index_name | VARCHAR(192) | Index name | +| 4 | db_name | VARCHAR(64) | Index column | +| 5 | index_type | VARCHAR(10) | SMA or tag index | +| 6 | index_extensions | VARCHAR(256) | Other information For SMA/tag indices, this shows a list of functions | ## INS_STABLES @@ -141,16 +151,16 @@ Provides information about supertables. | # | **Column** | **Data Type** | **Description** | | --- | :-----------: | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| 1 | stable_name | BINARY(192) | Supertable name | -| 2 | db_name | BINARY(64) | All databases in the supertable | +| 1 | stable_name | VARCHAR(192) | Supertable name | +| 2 | db_name | VARCHAR(64) | All databases in the supertable | | 3 | create_time | TIMESTAMP | Creation time | | 4 | columns | INT | Number of columns | | 5 | tags | INT | Number of tags. It should be noted that `tags` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 6 | last_update | TIMESTAMP | Last updated time | -| 7 | table_comment | BINARY(1024) | Table description | -| 8 | watermark | BINARY(64) | Window closing time. It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. | -| 9 | max_delay | BINARY(64) | Maximum delay for pushing stream processing results. It should be noted that `max_delay` is a TDengine keyword and needs to be escaped with ` when used as a column name. | -| 10 | rollup | BINARY(128) | Rollup aggregate function. It should be noted that `rollup` is a TDengine keyword and needs to be escaped with ` when used as a column name. | +| 7 | table_comment | VARCHAR(1024) | Table description | +| 8 | watermark | VARCHAR(64) | Window closing time. It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. | +| 9 | max_delay | VARCHAR(64) | Maximum delay for pushing stream processing results. It should be noted that `max_delay` is a TDengine keyword and needs to be escaped with ` when used as a column name. | +| 10 | rollup | VARCHAR(128) | Rollup aggregate function. It should be noted that `rollup` is a TDengine keyword and needs to be escaped with ` when used as a column name. | ## INS_TABLES @@ -158,37 +168,37 @@ Provides information about standard tables and subtables. | # | **Column** | **Data Type** | **Description** | | --- | :-----------: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------- | -| 1 | table_name | BINARY(192) | Table name | -| 2 | db_name | BINARY(64) | Database name | +| 1 | table_name | VARCHAR(192) | Table name | +| 2 | db_name | VARCHAR(64) | Database name | | 3 | create_time | TIMESTAMP | Creation time | | 4 | columns | INT | Number of columns | -| 5 | stable_name | BINARY(192) | Supertable name | +| 5 | stable_name | VARCHAR(192) | Supertable name | | 6 | uid | BIGINT | Table ID | | 7 | vgroup_id | INT | Vgroup ID | | 8 | ttl | INT | Table time-to-live. It should be noted that `ttl` is a TDengine keyword and needs to be escaped with ` when used as a column name. | -| 9 | table_comment | BINARY(1024) | Table description | -| 10 | type | BINARY(20) | Table type | +| 9 | table_comment | VARCHAR(1024) | Table description | +| 10 | type | VARCHAR(20) | Table type | ## INS_TAGS | # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------- | --------------- | -| 1 | table_name | BINARY(192) | Table name | -| 2 | db_name | BINARY(64) | Database name | -| 3 | stable_name | BINARY(192) | Supertable name | -| 4 | tag_name | BINARY(64) | Tag name | -| 5 | tag_type | BINARY(64) | Tag type | -| 6 | tag_value | BINARY(16384) | Tag value | +| 1 | table_name | VARCHAR(192) | Table name | +| 2 | db_name | VARCHAR(64) | Database name | +| 3 | stable_name | VARCHAR(192) | Supertable name | +| 4 | tag_name | VARCHAR(64) | Tag name | +| 5 | tag_type | VARCHAR(64) | Tag type | +| 6 | tag_value | VARCHAR(16384) | Tag value | ## INS_COLUMNS | # | **Column** | **Data Type** | **Description** | | --- | :-----------: | ------------- | ---------------- | -| 1 | table_name | BINARY(192) | Table name | -| 2 | db_name | BINARY(64) | Database name | -| 3 | table_type | BINARY(21) | Table type | -| 4 | col_name | BINARY(64) | Column name | -| 5 | col_type | BINARY(32) | Column type | +| 1 | table_name | VARCHAR(192) | Table name | +| 2 | db_name | VARCHAR(64) | Database name | +| 3 | table_type | VARCHAR(21) | Table type | +| 4 | col_name | VARCHAR(64) | Column name | +| 5 | col_type | VARCHAR(32) | Column type | | 6 | col_length | INT | Column length | | 7 | col_precision | INT | Column precision | | 8 | col_scale | INT | Column scale | @@ -196,51 +206,51 @@ Provides information about standard tables and subtables. ## INS_USERS -Provides information about TDengine users. +Provides information about TDengine users. Users whose SYSINFO attribute is 0 can't view this table. | # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------- | ---------------- | -| 1 | user_name | BINARY(23) | User name | -| 2 | privilege | BINARY(256) | User permissions | +| 1 | user_name | VARCHAR(23) | User name | +| 2 | privilege | VARCHAR(256) | User permissions | | 3 | create_time | TIMESTAMP | Creation time | ## INS_GRANTS -Provides information about TDengine Enterprise Edition permissions. +Provides information about TDengine Enterprise Edition permissions. Users whose SYSINFO attribute is 0 can't view this table. | # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| 1 | version | BINARY(9) | Whether the deployment is a licensed or trial version | -| 2 | cpu_cores | BINARY(9) | CPU cores included in license | -| 3 | dnodes | BINARY(10) | Dnodes included in license. It should be noted that `dnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. | -| 4 | streams | BINARY(10) | Streams included in license. It should be noted that `streams` is a TDengine keyword and needs to be escaped with ` when used as a column name. | -| 5 | users | BINARY(10) | Users included in license. It should be noted that `users` is a TDengine keyword and needs to be escaped with ` when used as a column name. | -| 6 | accounts | BINARY(10) | Accounts included in license. It should be noted that `accounts` is a TDengine keyword and needs to be escaped with ` when used as a column name. | -| 7 | storage | BINARY(21) | Storage space included in license. It should be noted that `storage` is a TDengine keyword and needs to be escaped with ` when used as a column name. | -| 8 | connections | BINARY(21) | Client connections included in license. It should be noted that `connections` is a TDengine keyword and needs to be escaped with ` when used as a column name. | -| 9 | databases | BINARY(11) | Databases included in license. It should be noted that `databases` is a TDengine keyword and needs to be escaped with ` when used as a column name. | -| 10 | speed | BINARY(9) | Write speed specified in license (data points per second) | -| 11 | querytime | BINARY(9) | Total query time specified in license | -| 12 | timeseries | BINARY(21) | Number of metrics included in license | -| 13 | expired | BINARY(5) | Whether the license has expired | -| 14 | expire_time | BINARY(19) | When the trial period expires | +| 1 | version | VARCHAR(9) | Whether the deployment is a licensed or trial version | +| 2 | cpu_cores | VARCHAR(9) | CPU cores included in license | +| 3 | dnodes | VARCHAR(10) | Dnodes included in license. It should be noted that `dnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. | +| 4 | streams | VARCHAR(10) | Streams included in license. It should be noted that `streams` is a TDengine keyword and needs to be escaped with ` when used as a column name. | +| 5 | users | VARCHAR(10) | Users included in license. It should be noted that `users` is a TDengine keyword and needs to be escaped with ` when used as a column name. | +| 6 | accounts | VARCHAR(10) | Accounts included in license. It should be noted that `accounts` is a TDengine keyword and needs to be escaped with ` when used as a column name. | +| 7 | storage | VARCHAR(21) | Storage space included in license. It should be noted that `storage` is a TDengine keyword and needs to be escaped with ` when used as a column name. | +| 8 | connections | VARCHAR(21) | Client connections included in license. It should be noted that `connections` is a TDengine keyword and needs to be escaped with ` when used as a column name. | +| 9 | databases | VARCHAR(11) | Databases included in license. It should be noted that `databases` is a TDengine keyword and needs to be escaped with ` when used as a column name. | +| 10 | speed | VARCHAR(9) | Write speed specified in license (data points per second) | +| 11 | querytime | VARCHAR(9) | Total query time specified in license | +| 12 | timeseries | VARCHAR(21) | Number of metrics included in license | +| 13 | expired | VARCHAR(5) | Whether the license has expired | +| 14 | expire_time | VARCHAR(19) | When the trial period expires | ## INS_VGROUPS -Provides information about vgroups. +Provides information about vgroups. Users whose SYSINFO attribute is 0 can't view this table. | # | **Column** | **Data Type** | **Description** | | --- | :--------: | ------------- | ----------------------------------------------------------------------------------------------------------------------------------- | | 1 | vgroup_id | INT | Vgroup ID | -| 2 | db_name | BINARY(32) | Database name | +| 2 | db_name | VARCHAR(32) | Database name | | 3 | tables | INT | Tables in vgroup. It should be noted that `tables` is a TDengine keyword and needs to be escaped with ` when used as a column name. | -| 4 | status | BINARY(10) | Vgroup status | +| 4 | status | VARCHAR(10) | Vgroup status | | 5 | v1_dnode | INT | Dnode ID of first vgroup member | -| 6 | v1_status | BINARY(10) | Status of first vgroup member | +| 6 | v1_status | VARCHAR(10) | Status of first vgroup member | | 7 | v2_dnode | INT | Dnode ID of second vgroup member | -| 8 | v2_status | BINARY(10) | Status of second vgroup member | +| 8 | v2_status | VARCHAR(10) | Status of second vgroup member | | 9 | v3_dnode | INT | Dnode ID of third vgroup member | -| 10 | v3_status | BINARY(10) | Status of third vgroup member | +| 10 | v3_status | VARCHAR(10) | Status of third vgroup member | | 11 | nfiles | INT | Number of data and metadata files in the vgroup | | 12 | file_size | INT | Size of the data and metadata files in the vgroup | | 13 | tsma | TINYINT | Whether time-range-wise SMA is enabled. 1 means enabled; 0 means disabled. | @@ -251,55 +261,57 @@ Provides system configuration information. | # | **Column** | **Data Type** | **Description** | | --- | :--------: | ------------- | ----------------------------------------------------------------------------------------------------------------------- | -| 1 | name | BINARY(32) | Parameter | -| 2 | value | BINARY(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. | +| 1 | name | VARCHAR(32) | Parameter | +| 2 | value | VARCHAR(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. | ## INS_DNODE_VARIABLES -Provides dnode configuration information. +Provides dnode configuration information. Users whose SYSINFO attribute is 0 can't view this table. | # | **Column** | **Data Type** | **Description** | | --- | :--------: | ------------- | ----------------------------------------------------------------------------------------------------------------------- | | 1 | dnode_id | INT | Dnode ID | -| 2 | name | BINARY(32) | Parameter | -| 3 | value | BINARY(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. | +| 2 | name | VARCHAR(32) | Parameter | +| 3 | value | VARCHAR(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. | ## INS_TOPICS | # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------- | -------------------------------------- | -| 1 | topic_name | BINARY(192) | Topic name | -| 2 | db_name | BINARY(64) | Database for the topic | +| 1 | topic_name | VARCHAR(192) | Topic name | +| 2 | db_name | VARCHAR(64) | Database for the topic | | 3 | create_time | TIMESTAMP | Creation time | -| 4 | sql | BINARY(1024) | SQL statement used to create the topic | +| 4 | sql | VARCHAR(1024) | SQL statement used to create the topic | ## INS_SUBSCRIPTIONS | # | **Column** | **Data Type** | **Description** | | --- | :------------: | ------------- | --------------------------- | -| 1 | topic_name | BINARY(204) | Subscribed topic | -| 2 | consumer_group | BINARY(193) | Subscribed consumer group | +| 1 | topic_name | VARCHAR(204) | Subscribed topic | +| 2 | consumer_group | VARCHAR(193) | Subscribed consumer group | | 3 | vgroup_id | INT | Vgroup ID for the consumer | | 4 | consumer_id | BIGINT | Consumer ID | -| 5 | offset | BINARY(64) | Consumption progress | +| 5 | offset | VARCHAR(64) | Consumption progress | | 6 | rows | BIGINT | Number of consumption items | ## INS_STREAMS | # | **Column** | **Data Type** | **Description** | | --- | :----------: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| 1 | stream_name | BINARY(64) | Stream name | +| 1 | stream_name | VARCHAR(64) | Stream name | | 2 | create_time | TIMESTAMP | Creation time | -| 3 | sql | BINARY(1024) | SQL statement used to create the stream | -| 4 | status | BINARY(20) | Current status | -| 5 | source_db | BINARY(64) | Source database | -| 6 | target_db | BINARY(64) | Target database | -| 7 | target_table | BINARY(192) | Target table | +| 3 | sql | VARCHAR(1024) | SQL statement used to create the stream | +| 4 | status | VARCHAR(20) | Current status | +| 5 | source_db | VARCHAR(64) | Source database | +| 6 | target_db | VARCHAR(64) | Target database | +| 7 | target_table | VARCHAR(192) | Target table | | 8 | watermark | BIGINT | Watermark (see stream processing documentation). It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 9 | trigger | INT | Method of triggering the result push (see stream processing documentation). It should be noted that `trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. | ## INS_USER_PRIVILEGES +Users whose SYSINFO attribute is 0 can't view this table. + | # | **Column** | **Data Type** | **Description** |** | | --- | :----------: | ------------ | -------------------------------------------| | 1 | user_name | VARCHAR(24) | Username | diff --git a/docs/en/12-taos-sql/24-show.md b/docs/en/12-taos-sql/24-show.md index 9e2897160c..2a3975f9a2 100644 --- a/docs/en/12-taos-sql/24-show.md +++ b/docs/en/12-taos-sql/24-show.md @@ -73,10 +73,10 @@ Shows the SQL statement used to create the specified table. This statement can b ## SHOW DATABASES ```sql -SHOW DATABASES; +SHOW [USER | SYSTEM] DATABASES; ``` -Shows all user-created databases. +Shows all databases. The `USER` qualifier specifies only user-created databases. The `SYSTEM` qualifier specifies only system databases. ## SHOW DNODES @@ -183,10 +183,10 @@ Shows all subscriptions in the system. ## SHOW TABLES ```sql -SHOW [db_name.]TABLES [LIKE 'pattern']; +SHOW [NORMAL | CHILD] [db_name.]TABLES [LIKE 'pattern']; ``` -Shows all standard tables and subtables in the current database. You can use LIKE for fuzzy matching. +Shows all standard tables and subtables in the current database. You can use LIKE for fuzzy matching. The `Normal` qualifier specifies standard tables. The `CHILD` qualifier specifies subtables. ## SHOW TABLE DISTRIBUTED diff --git a/docs/en/13-operation/01-pkg-install.md b/docs/en/13-operation/01-pkg-install.md deleted file mode 100644 index 5610139471..0000000000 --- a/docs/en/13-operation/01-pkg-install.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: Install and Uninstall -description: This document describes how to install, upgrade, and uninstall TDengine. ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -This document gives more information about installing, uninstalling, and upgrading TDengine. - -## Install - -About details of installing TDenine, please refer to [Installation Guide](../../get-started/package/). - -## Uninstall - - - - -Uninstall package of TDengine by apt-get can be uninstalled as below: - -```bash -$ sudo apt-get remove tdengine -Reading package lists... Done -Building dependency tree -Reading state information... Done -The following packages will be REMOVED: - tdengine -0 upgraded, 0 newly installed, 1 to remove and 18 not upgraded. -After this operation, 68.3 MB disk space will be freed. -Do you want to continue? [Y/n] y -(Reading database ... 135625 files and directories currently installed.) -Removing tdengine (3.0.0.0) ... -TDengine is removed successfully! - -``` - -If you have installed taos-tools, please uninstall it first before uninstall TDengine. The command of uninstall is following: - -``` -$ sudo apt remove taostools -Reading package lists... Done -Building dependency tree -Reading state information... Done -The following packages will be REMOVED: - taostools -0 upgraded, 0 newly installed, 1 to remove and 0 not upgraded. -After this operation, 68.3 MB disk space will be freed. -Do you want to continue? [Y/n] -(Reading database ... 147973 files and directories currently installed.) -Removing taostools (2.1.2) ... -``` - - - - -Deb package of TDengine can be uninstalled as below: - -``` -$ sudo dpkg -r tdengine -(Reading database ... 137504 files and directories currently installed.) -Removing tdengine (3.0.0.0) ... -TDengine is removed successfully! - -``` - -Deb package of taosTools can be uninstalled as below: - -``` -$ sudo dpkg -r taostools -(Reading database ... 147973 files and directories currently installed.) -Removing taostools (2.1.2) ... -``` - - - - - -RPM package of TDengine can be uninstalled as below: - -``` -$ sudo rpm -e tdengine -TDengine is removed successfully! -``` - -RPM package of taosTools can be uninstalled as below: - -``` -sudo rpm -e taostools -taosToole is removed successfully! -``` - - - - - -tar.gz package of TDengine can be uninstalled as below: - -``` -$ rmtaos -TDengine is removed successfully! -``` - -tar.gz package of taosTools can be uninstalled as below: - -``` -$ rmtaostools -Start to uninstall taos tools ... - -taos tools is uninstalled successfully! -``` - - - - -Run C:\TDengine\unins000.exe to uninstall TDengine on a Windows system. - - - - -TDengine can be uninstalled as below: - -``` -$ rmtaos -TDengine is removed successfully! -``` - - - - -:::info - -- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine. The packages may affect each other and cause errors. - -- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. - - ``` - $ sudo rm -f /var/lib/dpkg/info/tdengine* - ``` - -You can then reinstall if needed. - -- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. - - ``` - $ sudo rpm -e --noscripts tdengine - ``` - -You can then reinstall if needed. - -::: - -Uninstalling and Modifying Files - -- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data. - -- When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used. - - -## Upgrade -There are two aspects in upgrade operation: upgrade installation package and upgrade a running server. - -To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version. - -Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 2 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below: -- Stop inserting data -- Make sure all data is persisted to disk, please use command `flush database` -- Stop the cluster of TDengine -- Uninstall old version and install new version -- Start the cluster of TDengine -- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss -- Run some simple data insertion statements to make sure the cluster works well -- Restore business services - -:::warning -TDengine doesn't guarantee any lower version is compatible with the data generated by a higher version, so it's never recommended to downgrade the version. - -::: diff --git a/docs/en/13-operation/10-monitor.md b/docs/en/13-operation/10-monitor.md index c1c6ac3c4c..2a667c132f 100644 --- a/docs/en/13-operation/10-monitor.md +++ b/docs/en/13-operation/10-monitor.md @@ -106,22 +106,22 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch |field|type|is\_tag|comment| |:----|:---|:-----|:------| |ts|TIMESTAMP||timestamp| -|uptime|FLOAT||dnode uptime| +|uptime|FLOAT||dnode uptime in `days`| |cpu\_engine|FLOAT||cpu usage of tdengine. read from `/proc//stat`| |cpu\_system|FLOAT||cpu usage of server. read from `/proc/stat`| |cpu\_cores|FLOAT||cpu cores of server| |mem\_engine|INT||memory usage of tdengine. read from `/proc//status`| -|mem\_system|INT||available memory on the server| +|mem\_system|INT||available memory on the server in `KB`| |mem\_total|INT||total memory of server in `KB`| |disk\_engine|INT||| |disk\_used|BIGINT||usage of data dir in `bytes`| |disk\_total|BIGINT||the capacity of data dir in `bytes`| -|net\_in|FLOAT||network throughput rate in kb/s. read from `/proc/net/dev`| -|net\_out|FLOAT||network throughput rate in kb/s. read from `/proc/net/dev`| -|io\_read|FLOAT||io throughput rate in kb/s. read from `/proc//io`| -|io\_write|FLOAT||io throughput rate in kb/s. read from `/proc//io`| -|io\_read\_disk|FLOAT||io throughput rate of disk in kb/s. read from `/proc//io`| -|io\_write\_disk|FLOAT||io throughput rate of disk in kb/s. read from `/proc//io`| +|net\_in|FLOAT||network throughput rate in byte/s. read from `/proc/net/dev`| +|net\_out|FLOAT||network throughput rate in byte/s. read from `/proc/net/dev`| +|io\_read|FLOAT||io throughput rate in byte/s. read from `/proc//io`| +|io\_write|FLOAT||io throughput rate in byte/s. read from `/proc//io`| +|io\_read\_disk|FLOAT||io throughput rate of disk in byte/s. read from `/proc//io`| +|io\_write\_disk|FLOAT||io throughput rate of disk in byte/s. read from `/proc//io`| |req\_select|INT||number of select queries received per dnode| |req\_select\_rate|FLOAT||number of select queries received per dnode divided by monitor interval.| |req\_insert|INT||number of insert queries received per dnode| @@ -150,9 +150,9 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch |ts|TIMESTAMP||timestamp| |name|NCHAR||data directory. default is `/var/lib/taos`| |level|INT||level for multi-level storage| -|avail|BIGINT||available space for data directory| -|used|BIGINT||used space for data directory| -|total|BIGINT||total space for data directory| +|avail|BIGINT||available space for data directory in `bytes`| +|used|BIGINT||used space for data directory in `bytes`| +|total|BIGINT||total space for data directory in `bytes`| |dnode\_id|INT|TAG|dnode id| |dnode\_ep|NCHAR|TAG|dnode endpoint| |cluster\_id|NCHAR|TAG|cluster id| @@ -165,9 +165,9 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch |:----|:---|:-----|:------| |ts|TIMESTAMP||timestamp| |name|NCHAR||log directory. default is `/var/log/taos/`| -|avail|BIGINT||available space for log directory| -|used|BIGINT||used space for data directory| -|total|BIGINT||total space for data directory| +|avail|BIGINT||available space for log directory in `bytes`| +|used|BIGINT||used space for data directory in `bytes`| +|total|BIGINT||total space for data directory in `bytes`| |dnode\_id|INT|TAG|dnode id| |dnode\_ep|NCHAR|TAG|dnode endpoint| |cluster\_id|NCHAR|TAG|cluster id| @@ -180,9 +180,9 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch |:----|:---|:-----|:------| |ts|TIMESTAMP||timestamp| |name|NCHAR||temp directory. default is `/tmp/`| -|avail|BIGINT||available space for temp directory| -|used|BIGINT||used space for temp directory| -|total|BIGINT||total space for temp directory| +|avail|BIGINT||available space for temp directory in `bytes`| +|used|BIGINT||used space for temp directory in `bytes`| +|total|BIGINT||total space for temp directory in `bytes`| |dnode\_id|INT|TAG|dnode id| |dnode\_ep|NCHAR|TAG|dnode endpoint| |cluster\_id|NCHAR|TAG|cluster id| diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx index 5067c33e2d..02c176ee3d 100644 --- a/docs/en/14-reference/03-connector/07-python.mdx +++ b/docs/en/14-reference/03-connector/07-python.mdx @@ -31,11 +31,13 @@ We recommend using the latest version of `taospy`, regardless of the version of |Python Connector Version|major changes| |:-------------------:|:----:| +|2.7.12|1. added support for `varbinary` type (STMT does not yet support)
2. improved query performance (thanks to contributor [hadrianl](https://github.com/taosdata/taos-connector-python/pull/209))| |2.7.9|support for getting assignment and seek function on subscription| |2.7.8|add `execute_many` method| |Python Websocket Connector Version|major changes| |:----------------------------:|:-----:| +|0.2.9|bugs fixes| |0.2.5|1. support for getting assignment and seek function on subscription
2. support schemaless
3. support STMT| |0.2.4|support `unsubscribe` on subscription| @@ -1023,10 +1025,6 @@ Due to the current imperfection of Python's nanosecond support (see link below), 1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds 2. https://www.python.org/dev/peps/pep-0564/ -## Important Update - -[**Release Notes**] (https://github.com/taosdata/taos-connector-python/releases) - ## API Reference - [taos](https://docs.taosdata.com/api/taospy/taos/) diff --git a/docs/en/14-reference/03-connector/80-php.mdx b/docs/en/14-reference/03-connector/80-php.mdx index b6a31b6de3..b3c2065b6e 100644 --- a/docs/en/14-reference/03-connector/80-php.mdx +++ b/docs/en/14-reference/03-connector/80-php.mdx @@ -52,8 +52,6 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive && tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1 ``` -> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please find available versions in [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases). - **Non-Swoole Environment: ** ```shell diff --git a/docs/en/14-reference/03-connector/_linux_install.mdx b/docs/en/14-reference/03-connector/_linux_install.mdx index 398593cfe6..d637c2cb69 100644 --- a/docs/en/14-reference/03-connector/_linux_install.mdx +++ b/docs/en/14-reference/03-connector/_linux_install.mdx @@ -4,7 +4,6 @@ import PkgListV3 from "/components/PkgListV3"; - [All Downloads](../../releases/tdengine) 2. Unzip diff --git a/docs/en/14-reference/03-connector/_macos_install.mdx b/docs/en/14-reference/03-connector/_macos_install.mdx index effabbbebe..31ceae68b6 100644 --- a/docs/en/14-reference/03-connector/_macos_install.mdx +++ b/docs/en/14-reference/03-connector/_macos_install.mdx @@ -4,8 +4,6 @@ import PkgListV3 from "/components/PkgListV3"; - [All Downloads](../../releases/tdengine) - 2. Execute the installer, select the default value as prompted, and complete the installation. If the installation is blocked, you can right-click or ctrl-click on the installation package and select `Open`. 3. configure taos.cfg diff --git a/docs/en/14-reference/03-connector/_windows_install.mdx b/docs/en/14-reference/03-connector/_windows_install.mdx index 723f685b5d..a6e03f30fb 100644 --- a/docs/en/14-reference/03-connector/_windows_install.mdx +++ b/docs/en/14-reference/03-connector/_windows_install.mdx @@ -3,8 +3,6 @@ import PkgListV3 from "/components/PkgListV3"; 1. Download the client installation package - - [All Downloads](../../releases/tdengine) 2. Execute the installer, select the default value as prompted, and complete the installation 3. Installation path diff --git a/docs/en/14-reference/04-taosadapter.md b/docs/en/14-reference/04-taosadapter.md index 6bc49768c6..c75598b0df 100644 --- a/docs/en/14-reference/04-taosadapter.md +++ b/docs/en/14-reference/04-taosadapter.md @@ -31,7 +31,7 @@ taosAdapter provides the following features. ### Install taosAdapter -If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine 3.0 released versions](../../releases/tdengine) to download the TDengine server installation package. If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/3.0/BUILD.md) documentation. +If you use the TDengine server, you don't need additional steps to install taosAdapter. If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/3.0/BUILD.md) documentation. ### Start/Stop taosAdapter @@ -180,7 +180,7 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl node_export is an exporter for machine metrics. Please visit [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) for more information. - Support for Prometheus remote_read and remote_write remote_read and remote_write are interfaces for Prometheus data read and write from/to other data storage solution. Please visit [https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) for more information. -- Get table's VGroup ID. For more information about VGroup, please refer to [primary-logic-unit](/tdinternal/arch/#primary-logic-unit). +- Get table's VGroup ID. ## Interfaces @@ -246,7 +246,7 @@ node_export is an exporter of hardware and OS metrics exposed by the \*NIX kerne ### Get table's VGroup ID -You can call `http://:6041/rest/vgid?db=&table=` to get table's VGroup ID. For more information about VGroup, please refer to [primary-logic-unit](/tdinternal/arch/#primary-logic-unit). +You can call `http://:6041/rest/vgid?db=&table=
` to get table's VGroup ID. ## Memory usage optimization methods diff --git a/docs/en/14-reference/05-taosbenchmark.md b/docs/en/14-reference/05-taosbenchmark.md index e65046f65d..e052c0d02b 100644 --- a/docs/en/14-reference/05-taosbenchmark.md +++ b/docs/en/14-reference/05-taosbenchmark.md @@ -397,6 +397,7 @@ The configuration parameters for specifying super table tag columns and data col ### Query scenario configuration parameters `filetype` must be set to `query` in the query scenario. +`query_times` is number of times queries were run. To control the query scenario by setting `kill_slow_query_threshold` and `kill_slow_query_interval` parameters to kill the execution of slow query statements. Threshold controls exec_usec of query command will be killed by taosBenchmark after the specified time, in seconds; interval controls sleep time to avoid continuous querying of slow queries consuming CPU in seconds. diff --git a/docs/en/14-reference/06-taosdump.md b/docs/en/14-reference/06-taosdump.md index baf07d6b9e..c07465a97c 100644 --- a/docs/en/14-reference/06-taosdump.md +++ b/docs/en/14-reference/06-taosdump.md @@ -103,7 +103,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...] use letter and number only. Default is NOT. -n, --no-escape No escape char '`'. Default is using it. -Q, --dot-replace Repalce dot character with underline character in - the table name. + the table name.(Version 2.5.3) -T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is 8. -C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service @@ -113,6 +113,10 @@ Usage: taosdump [OPTION...] dbname [tbname ...] -?, --help Give this help list --usage Give a short usage message -V, --version Print program version + -W, --rename=RENAME-LIST Rename database name with new name during + importing data. RENAME-LIST: + "db1=newDB1|db2=newDB2" means rename db1 to newDB1 + and rename db2 to newDB2 (Version 2.5.4) Mandatory or optional arguments to long options are also mandatory or optional for any corresponding short options. diff --git a/docs/en/20-third-party/70-seeq.md b/docs/en/20-third-party/70-seeq.md index e7ad5c8173..e42204dd5d 100644 --- a/docs/en/20-third-party/70-seeq.md +++ b/docs/en/20-third-party/70-seeq.md @@ -10,76 +10,60 @@ description: How to use Seeq and TDengine to perform time series data analysis Seeq is an advanced analytics software for the manufacturing industry and the Industrial Internet of Things (IIoT). Seeq supports the use of machine learning innovations within process manufacturing organizations. These capabilities enable organizations to deploy their own or third-party machine learning algorithms into advanced analytics applications used by frontline process engineers and subject matter experts, thus extending the efforts of a single data scientist to many frontline workers. -With the TDengine Java connector, Seeq effortlessly supports querying time series data provided by TDengine and offers functionalities such as data visualization, analysis, and forecasting. +TDengine can be added as a data source into Seeq via JDBC connector. Once data source is configured, Seeq can read data from TDengine and offers functionalities such as data visualization, analysis, and forecasting. -### Install Seeq +## Prerequisite -Please download Seeq Server and Seeq Data Lab software installation package from the [Seeq official website](https://www.seeq.com/customer-download). +1. Install Seeq Server and Seeq Data Lab software +2. Install TDengine or register TDengine Cloud service -### Install and start Seeq Server - -``` -tar xvzf seeq-server-xxx.tar.gz -cd seeq-server-installer -sudo ./install - -sudo seeq service enable -sudo seeq start -``` - -### Install and start Seeq Data Lab Server - -Seeq Data Lab needs to be installed on a separate server from Seeq Server and connected to Seeq Server through configuration. For detailed installation and configuration instructions, please refer to [the official documentation](https://support.seeq.com/space/KB/1034059842). - -``` -tar xvf seeq-data-lab--64bit-linux.tar.gz -sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq -sudo seeq config set Network/DataLab/Hostname localhost -sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231) -sudo seeq config set Network/Hostname # the host IP or URL of the main Seeq Server - -# If the main Seeq server is configured to listen over HTTPS -sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443) - -# If the main Seeq server is NOT configured to listen over HTTPS -sudo seeq config set Network/Webserver/Port - -#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server: -sudo seeq config set Network/DataLab/Hostname # the host IP (not URL) of the Data Lab server -sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231 -``` - -### Install TDengine on-premise instance - -See [Quick Install from Package](../../get-started). - -### Or use TDengine Cloud - -Register for a [TDengine Cloud](https://cloud.tdengine.com) account and log in to your account. - -## Make Seeq be able to access TDengine - -1. Get data location configuration +## Install TDengine JDBC connector +1. Get Seeq data location configuration ``` sudo seeq config get Folders/Data ``` - -2. Download TDengine Java connector from maven.org. Please use the latest version (Current is 3.2.5, https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar). - +2. Download the latest TDengine Java connector from maven.org (current is version is [3.2.5](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar)), and copy the JAR file into the_directory_found_in_step_1/plugins/lib/ 3. Restart Seeq server - ``` sudo seeq restart ``` -4. Input License +## Add TDengine into Seeq's data source +1. Open Seeq, login as admin, go to Administration, click "Add Data Source" +2. For connector, choose SQL connector v2 +3. Inside "Additional Configuration" input box, copy and paste the following -Use a browser to access ip:34216 and input the license according to the guide. +``` +{ + "QueryDefinitions": [] + "Type": "GENERIC", + "Hostname": null, + "Port": 0, + "DatabaseName": null, + "Username": null, + "Password": null, + "InitialSql": null, + "TimeZone": null, + "PrintRows": false, + "UseWindowsAuth": false, + "SqlFetchBatchSize": 100000, + "UseSSL": false, + "JdbcProperties": null, + "GenericDatabaseConfig": { + "DatabaseJdbcUrl": "jdbc:TAOS-RS://localhost:6030/?user=root&password=taosdata", + "SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver", + "ResolutionInNanoseconds": 1000, + "ZonedColumnTypes": [] + } +} +``` -## How to use Seeq to analyze time-series data that TDengine serves +Note: You need to replace DatabaseJdbcUrl with your setting. Please login TDengine cloud or open taosExplorer for enterprise edition, click programming -> Java to find yours. For the "QueryDefintions", please follow the examples below to write your own. -This chapter demonstrates how to use Seeq software in conjunction with TDengine for time series data analysis. +## Use Seeq to analyze time-series data stored inside TDengine + +This chapter demonstrates how to use Seeq with TDengine for time series data analysis. ### Scenario Overview @@ -150,8 +134,8 @@ Please login with Seeq administrator and create a few data sources as following. "Hostname": null, "Port": 0, "DatabaseName": null, - "Username": "root", - "Password": "taosdata", + "Username": null, + "Password": null, "InitialSql": null, "TimeZone": null, "PrintRows": false, @@ -210,8 +194,8 @@ Please login with Seeq administrator and create a few data sources as following. "Hostname": null, "Port": 0, "DatabaseName": null, - "Username": "root", - "Password": "taosdata", + "Username": null, + "Password": null, "InitialSql": null, "TimeZone": null, "PrintRows": false, @@ -269,8 +253,8 @@ Please login with Seeq administrator and create a few data sources as following. "Hostname": null, "Port": 0, "DatabaseName": null, - "Username": "root", - "Password": "taosdata", + "Username": null, + "Password": null, "InitialSql": null, "TimeZone": null, "PrintRows": false, @@ -289,13 +273,13 @@ Please login with Seeq administrator and create a few data sources as following. #### Launch Seeq Workbench -Please login to Seeq server with IP:port and create a new Seeq Workbench, then select data sources and choose the correct tools to do data visualization and analysis. Please refer to [the official documentation](https://support.seeq.com/space/KB/146440193/Seeq+Workbench) for the details. +Please login to Seeq server and create a new Seeq Workbench, then select data sources and choose the correct tools to do data visualization and analysis. Please refer to [the official documentation](https://support.seeq.com/space/KB/146440193/Seeq+Workbench) for the details. ![Seeq Workbench](./seeq/seeq-demo-workbench.webp) #### Use Seeq Data Lab Server for advanced data analysis -Please login to the Seeq service with IP:port and create a new Seeq Data Lab. Then you can use advanced tools including Python environment and machine learning add-ons for more complex analysis. +Please login to the Seeq service and create a new Seeq Data Lab. Then you can use advanced tools including Python environment and machine learning add-ons for more complex analysis. ```Python from seeq import spy @@ -370,13 +354,15 @@ Please note that when using TDengine Cloud, you need to specify the database nam #### The data source of TDengine Cloud example +This data source contains the data from a smart meter in public database smartmeters. + ``` { "QueryDefinitions": [ { "Name": "CloudVoltage", "Type": "SIGNAL", - "Sql": "SELECT ts, voltage FROM test.meters", + "Sql": "SELECT ts, voltage FROM smartmeters.d1000", "Enabled": true, "TestMode": false, "TestQueriesDuringSync": true, @@ -409,8 +395,8 @@ Please note that when using TDengine Cloud, you need to specify the database nam "Hostname": null, "Port": 0, "DatabaseName": null, - "Username": "root", - "Password": "taosdata", + "Username": null, + "Password": null, "InitialSql": null, "TimeZone": null, "PrintRows": false, @@ -419,7 +405,7 @@ Please note that when using TDengine Cloud, you need to specify the database nam "UseSSL": false, "JdbcProperties": null, "GenericDatabaseConfig": { - "DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.cloud.taosdata.com?useSSL=true&token=41ac9d61d641b6b334e8b76f45f5a8XXXXXXXXXX", + "DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.us-west-2.aws.cloud.tdengine.com?useSSL=true&token=42b874395452d36f38dd6bf4317757611b213683", "SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver", "ResolutionInNanoseconds": 1000, "ZonedColumnTypes": [] @@ -433,8 +419,8 @@ Please note that when using TDengine Cloud, you need to specify the database nam ## Conclusion -By integrating Seeq and TDengine, it is possible to leverage the efficient storage and querying performance of TDengine while also benefiting from Seeq's powerful data visualization and analysis capabilities provided to users. +By integrating Seeq and TDengine, you can leverage the efficient storage and querying performance of TDengine while also benefiting from Seeq's powerful data visualization and analysis capabilities provided to users. -This integration allows users to take advantage of TDengine's high-performance time-series data storage and retrieval, ensuring efficient handling of large volumes of data. At the same time, Seeq provides advanced analytics features such as data visualization, anomaly detection, correlation analysis, and predictive modeling, enabling users to gain valuable insights and make data-driven decisions. +This integration allows users to take advantage of TDengine's high-performance time-series data storage and query, ensuring efficient handling of large volumes of data. At the same time, Seeq provides advanced analytics features such as data visualization, anomaly detection, correlation analysis, and predictive modeling, enabling users to gain valuable insights and make data-driven decisions. Together, Seeq and TDengine provide a comprehensive solution for time series data analysis in diverse industries such as manufacturing, IIoT, and power systems. The combination of efficient data storage and advanced analytics empowers users to unlock the full potential of their time series data, driving operational improvements, and enabling predictive and prescriptive analytics applications. diff --git a/docs/zh/02-intro.md b/docs/zh/02-intro.md index bb989f27da..93d650ddd6 100644 --- a/docs/zh/02-intro.md +++ b/docs/zh/02-intro.md @@ -4,20 +4,14 @@ description: 简要介绍 TDengine 的主要功能 toc_max_heading_level: 2 --- -TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/),且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。 +TDengine 是一款专为物联网、工业互联网等场景设计并优化的大数据平台,它能安全高效地将大量设备、数据采集器每天产生的高达 TB 甚至 PB 级的数据进行汇聚、存储、分析和分发,对业务运行状态进行实时监测、预警,提供实时的商业洞察。其核心模块是高性能、集群开源、云原生、极简的时序数据库 TDengine OSS。 -本章节介绍 TDengine 的主要产品和功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。 -## 主要产品 - -TDengine 有三个主要产品:TDengine Enterprise (即 TDengine 企业版),TDengine Cloud,和 TDengine OSS,关于它们的具体定义请参考 -- [TDengine 企业版](https://www.taosdata.com/tdengine-pro) -- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn) -- [TDengine 开源版](https://www.taosdata.com/tdengine-oss) +本节介绍 TDengine OSS 的主要产品和功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine OSS 有个整体了解 ## 主要功能 -TDengine 的主要功能如下: +TDengine OSS 的主要功能如下: 1. 写入数据,支持 - [SQL 写入](../develop/insert-data/sql-writing) @@ -150,3 +144,10 @@ TDengine 的主要功能如下: - [TDengine VS InfluxDB ,写入性能大 PK !](https://www.taosdata.com/2021/11/05/3248.html) - [TDengine 和 InfluxDB 查询性能对比测试报告](https://www.taosdata.com/2022/02/22/5969.html) - [TDengine 与 InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse 等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf) + + +## 主要产品 + +TDengine 有两个主要产品:TDengine Enterprise (即 TDengine 企业版)和 TDengine Cloud,关于它们的具体定义请参考 +- [TDengine 企业版](https://www.taosdata.com/tdengine-pro) +- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn) diff --git a/docs/zh/05-get-started/index.md b/docs/zh/05-get-started/index.md index 16172277b5..0b7ca02b9f 100644 --- a/docs/zh/05-get-started/index.md +++ b/docs/zh/05-get-started/index.md @@ -4,7 +4,7 @@ description: '快速设置 TDengine 环境并体验其高效写入和查询' --- import xiaot from './xiaot.webp' -import xiaot_new from './xiaot-03.webp' +import xiaot_new from './xiaot-20231007.png' import channel from './channel.webp' import official_account from './official-account.webp' diff --git a/docs/zh/05-get-started/xiaot-20231007.png b/docs/zh/05-get-started/xiaot-20231007.png new file mode 100644 index 0000000000..553bcbd090 Binary files /dev/null and b/docs/zh/05-get-started/xiaot-20231007.png differ diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.md similarity index 96% rename from docs/zh/07-develop/07-tmq.mdx rename to docs/zh/07-develop/07-tmq.md index 927d762829..8e43631c9a 100644 --- a/docs/zh/07-develop/07-tmq.mdx +++ b/docs/zh/07-develop/07-tmq.md @@ -63,17 +63,17 @@ import CDemo from "./_sub_c.mdx"; typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param)); typedef enum tmq_conf_res_t { - TMQ_CONF_UNKNOWN = -2, - TMQ_CONF_INVALID = -1, - TMQ_CONF_OK = 0, -} tmq_conf_res_t; + TMQ_CONF_UNKNOWN = -2, + TMQ_CONF_INVALID = -1, + TMQ_CONF_OK = 0, + } tmq_conf_res_t; typedef struct tmq_topic_assignment { - int32_t vgId; - int64_t currentOffset; - int64_t begin; - int64_t end; -} tmq_topic_assignment; + int32_t vgId; + int64_t currentOffset; + int64_t begin; + int64_t end; + } tmq_topic_assignment; DLL_EXPORT tmq_conf_t *tmq_conf_new(); DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); @@ -106,7 +106,7 @@ import CDemo from "./_sub_c.mdx"; DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); - DLL_EXPORT const char *tmq_err2str(int32_t code);DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); + DLL_EXPORT const char *tmq_err2str(int32_t code); ``` 下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。 @@ -351,10 +351,10 @@ CREATE TOPIC topic_name [with meta] AS DATABASE db_name; | `td.connect.port` | integer | 服务端的端口号 | | | `group.id` | string | 消费组 ID,同一消费组共享消费进度 |
**必填项**。最大长度:192。
每个topic最多可建立100个 consumer group | | `client.id` | string | 客户端 ID | 最大长度:192。 | -| `auto.offset.reset` | enum | 消费组订阅的初始位置 |
`earliest`: default;从头开始订阅;
`latest`: 仅从最新数据开始订阅;
`none`: 没有提交的 offset 无法订阅 | +| `auto.offset.reset` | enum | 消费组订阅的初始位置 |
`earliest`: default(version < 3.2.0.0);从头开始订阅;
`latest`: default(version >= 3.2.0.0);仅从最新数据开始订阅;
`none`: 没有提交的 offset 无法订阅 | | `enable.auto.commit` | boolean | 是否启用消费位点自动提交,true: 自动提交,客户端应用无需commit;false:客户端应用需要自行commit | 默认值为 true | | `auto.commit.interval.ms` | integer | 消费记录自动提交消费位点时间间隔,单位为毫秒 | 默认值为 5000 | -| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句) |默认关闭 | +| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句)(从3.2.0.0版本该参数废弃,恒为true) |默认关闭 | 对于不同编程语言,其设置方式如下: @@ -459,7 +459,19 @@ from taos.tmq import Consumer # Syntax: `consumer = Consumer(configs)` # # Example: -consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"}) +consumer = Consumer( + { + "group.id": "local", + "client.id": "1", + "enable.auto.commit": "true", + "auto.commit.interval.ms": "1000", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "auto.offset.reset": "earliest", + "msg.with.table.name": "true", + } +) ``` diff --git a/docs/zh/08-connector/30-python.mdx b/docs/zh/08-connector/30-python.mdx index ab98b5b8de..1526c0da6e 100644 --- a/docs/zh/08-connector/30-python.mdx +++ b/docs/zh/08-connector/30-python.mdx @@ -33,11 +33,13 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con |Python Connector 版本|主要变化| |:-------------------:|:----:| +|2.7.12|1. 新增 varbinary 类型支持(STMT暂不支持 varbinary )
2. query 性能提升(感谢贡献者[hadrianl](https://github.com/taosdata/taos-connector-python/pull/209))| |2.7.9|数据订阅支持获取消费进度和重置消费进度| |2.7.8|新增 `execute_many`| |Python Websocket Connector 版本|主要变化| |:----------------------------:|:-----:| +|0.2.9|已知问题修复| |0.2.5|1. 数据订阅支持获取消费进度和重置消费进度
2. 支持 schemaless
3. 支持 STMT| |0.2.4|数据订阅新增取消订阅方法| diff --git a/docs/zh/12-taos-sql/06-select.md b/docs/zh/12-taos-sql/06-select.md index 6d32d86f83..04508ceede 100755 --- a/docs/zh/12-taos-sql/06-select.md +++ b/docs/zh/12-taos-sql/06-select.md @@ -24,7 +24,7 @@ SELECT [hints] [DISTINCT] select_list hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */ hint: - BATCH_SCAN | NO_BATCH_SCAN + BATCH_SCAN | NO_BATCH_SCAN | SORT_FOR_GROUP select_list: select_expr [, select_expr] ... @@ -87,15 +87,17 @@ Hints 是用户控制单个语句查询优化的一种手段,当 Hint 不适 目前支持的 Hints 列表如下: -| **Hint** | **参数** | **说明** | **适用范围** | -| :-----------: | -------------- | -------------------------- | -------------------------- | -| BATCH_SCAN | 无 | 采用批量读表的方式 | 超级表 JOIN 语句 | -| NO_BATCH_SCAN | 无 | 采用顺序读表的方式 | 超级表 JOIN 语句 | +| **Hint** | **参数** | **说明** | **适用范围** | +| :-----------: | -------------- | -------------------------- | -----------------------------| +| BATCH_SCAN | 无 | 采用批量读表的方式 | 超级表 JOIN 语句 | +| NO_BATCH_SCAN | 无 | 采用顺序读表的方式 | 超级表 JOIN 语句 | +| SORT_FOR_GROUP| 无 | 采用sort方式进行分组 | partition by 列表有普通列时 | 举例: ```sql SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts; +SELECT /*+ SORT_FOR_GROUP() */ count(*), c1 FROM stable1 PARTITION BY c1; ``` ## 列表 diff --git a/docs/zh/12-taos-sql/16-operators.md b/docs/zh/12-taos-sql/16-operators.md index 0636121edd..c2f0cae9c4 100644 --- a/docs/zh/12-taos-sql/16-operators.md +++ b/docs/zh/12-taos-sql/16-operators.md @@ -54,6 +54,7 @@ LIKE 条件使用通配符字符串进行匹配检查,规则如下: MATCH 条件和 NMATCH 条件使用正则表达式进行匹配,规则如下: - 支持符合 POSIX 规范的正则表达式,具体规范内容可参见 Regular Expressions。 +- MATCH 和正则表达式匹配时, 返回 TURE. NMATCH 和正则表达式不匹配时, 返回 TRUE. - 只能针对子表名(即 tbname)、字符串类型的标签值进行正则表达式过滤,不支持普通列的过滤。 - 正则匹配字符串长度不能超过 128 字节。可以通过参数 maxRegexStringLen 设置和调整最大允许的正则匹配字符串,该参数是客户端配置参数,需要重启客户端才能生效 diff --git a/docs/zh/12-taos-sql/20-keywords.md b/docs/zh/12-taos-sql/20-keywords.md index e7e926d0b7..f59eda1689 100644 --- a/docs/zh/12-taos-sql/20-keywords.md +++ b/docs/zh/12-taos-sql/20-keywords.md @@ -180,6 +180,7 @@ description: TDengine 保留关键字的详细列表 - MAX_DELAY - BWLIMIT - MAXROWS +- MAX_SPEED - MERGE - META - MINROWS diff --git a/docs/zh/12-taos-sql/22-meta.md b/docs/zh/12-taos-sql/22-meta.md index 35794ec269..db53dd462b 100644 --- a/docs/zh/12-taos-sql/22-meta.md +++ b/docs/zh/12-taos-sql/22-meta.md @@ -26,7 +26,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数 ## INS_DNODES -提供 dnode 的相关信息。也可以使用 SHOW DNODES 来查询这些信息。 +提供 dnode 的相关信息。也可以使用 SHOW DNODES 来查询这些信息。 SYSINFO 为 0 的用户不能查看此表。 | # | **列名** | **数据类型** | **说明** | | --- | :------------: | ------------ | ----------------------------------------------------------------------------------------------------- | @@ -40,7 +40,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数 ## INS_MNODES -提供 mnode 的相关信息。也可以使用 SHOW MNODES 来查询这些信息。 +提供 mnode 的相关信息。也可以使用 SHOW MNODES 来查询这些信息。 SYSINFO 为 0 的用户不能查看此表。 | # | **列名** | **数据类型** | **说明** | | --- | :---------: | ------------ | ------------------ | @@ -52,22 +52,33 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数 ## INS_QNODES -当前系统中 QNODE 的信息。也可以使用 SHOW QNODES 来查询这些信息。 +当前系统中 QNODE 的信息。也可以使用 SHOW QNODES 来查询这些信息。SYSINFO 属性为 0 的用户不能查看此表。 | # | **列名** | **数据类型** | **说明** | | --- | :---------: | ------------ | ------------ | | 1 | id | SMALLINT | qnode id | -| 2 | endpoint | BINARY(134) | qnode 的地址 | +| 2 | endpoint | VARCHAR(134) | qnode 的地址 | | 3 | create_time | TIMESTAMP | 创建时间 | +## INS_SNODES + +当前系统中 SNODE 的信息。也可以使用 SHOW SNODES 来查询这些信息。SYSINFO 属性为 0 的用户不能查看此表。 + +| # | **列名** | **数据类型** | **说明** | +| --- | :---------: | ------------ | ------------ | +| 1 | id | SMALLINT | snode id | +| 2 | endpoint | VARCHAR(134) | snode 的地址 | +| 3 | create_time | TIMESTAMP | 创建时间 | + + ## INS_CLUSTER -存储集群相关信息。 +存储集群相关信息。 SYSINFO 属性为 0 的用户不能查看此表。 | # | **列名** | **数据类型** | **说明** | | --- | :---------: | ------------ | ---------- | | 1 | id | BIGINT | cluster id | -| 2 | name | BINARY(134) | 集群名称 | +| 2 | name | VARCHAR(134) | 集群名称 | | 3 | create_time | TIMESTAMP | 创建时间 | ## INS_DATABASES @@ -76,25 +87,25 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数 | # | **列名** | **数据类型** | **说明** | | --- | :------------------: | ---------------- | ------------------------------------------------ | -| 1 | name | BINARY(32) | 数据库名 | +| 1 | name | VARCHAR(64) | 数据库名 | | 2 | create_time | TIMESTAMP | 创建时间 | | 3 | ntables | INT | 数据库中表的数量,包含子表和普通表但不包含超级表 | | 4 | vgroups | INT | 数据库中有多少个 vgroup。需要注意,`vgroups` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | | 6 | replica | INT | 副本数。需要注意,`replica` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | -| 7 | strict | BINARY(4) | 废弃参数 | -| 8 | duration | INT | 单文件存储数据的时间跨度。需要注意,`duration` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | -| 9 | keep | INT | 数据保留时长。需要注意,`keep` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | +| 7 | strict | VARCHAR(4) | 废弃参数 | +| 8 | duration | VARCHAR(10) | 单文件存储数据的时间跨度。需要注意,`duration` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | +| 9 | keep | VARCHAR(32) | 数据保留时长。需要注意,`keep` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | | 10 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB。需要注意,`buffer` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | | 11 | pagesize | INT | 每个 VNODE 中元数据存储引擎的页大小,单位为 KB。需要注意,`pagesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | | 12 | pages | INT | 每个 vnode 元数据存储引擎的缓存页个数。需要注意,`pages` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | | 13 | minrows | INT | 文件块中记录的最大条数。需要注意,`minrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | | 14 | maxrows | INT | 文件块中记录的最小条数。需要注意,`maxrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | | 15 | comp | INT | 数据压缩方式。需要注意,`comp` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | -| 16 | precision | BINARY(2) | 时间分辨率。需要注意,`precision` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | -| 17 | status | BINARY(10) | 数据库状态 | -| 18 | retentions | BINARY (60) | 数据的聚合周期和保存时长。需要注意,`retentions` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | +| 16 | precision | VARCHAR(2) | 时间分辨率。需要注意,`precision` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | +| 17 | status | VARCHAR(10) | 数据库状态 | +| 18 | retentions | VARCHAR(60) | 数据的聚合周期和保存时长。需要注意,`retentions` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | | 19 | single_stable | BOOL | 表示此数据库中是否只可以创建一个超级表。需要注意,`single_stable` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | -| 20 | cachemodel | BINARY(60) | 表示是否在内存中缓存子表的最近数据。需要注意,`cachemodel` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | +| 20 | cachemodel | VARCHAR(60) | 表示是否在内存中缓存子表的最近数据。需要注意,`cachemodel` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | | 21 | cachesize | INT | 表示每个 vnode 中用于缓存子表最近数据的内存大小。需要注意,`cachesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | | 22 | wal_level | INT | WAL 级别。需要注意,`wal_level` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | | 23 | wal_fsync_period | INT | 数据落盘周期。需要注意,`wal_fsync_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | @@ -111,15 +122,15 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数 | # | **列名** | **数据类型** | **说明** | | --- | :-----------: | ------------- | --------------------------------------------------------------------------------------------- | -| 1 | name | BINARY(64) | 函数名 | -| 2 | comment | BINARY(255) | 补充说明。需要注意,`comment` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | +| 1 | name | VARCHAR(64) | 函数名 | +| 2 | comment | VARCHAR(255) | 补充说明。需要注意,`comment` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | | 3 | aggregate | INT | 是否为聚合函数。需要注意,`aggregate` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | -| 4 | output_type | BINARY(31) | 输出类型 | +| 4 | output_type | VARCHAR(31) | 输出类型 | | 5 | create_time | TIMESTAMP | 创建时间 | | 6 | code_len | INT | 代码长度 | | 7 | bufsize | INT | buffer 大小 | -| 8 | func_language | BINARY(31) | 自定义函数编程语言 | -| 9 | func_body | BINARY(16384) | 函数体定义 | +| 8 | func_language | VARCHAR(31) | 自定义函数编程语言 | +| 9 | func_body | VARCHAR(16384) | 函数体定义 | | 10 | func_version | INT | 函数版本号。初始版本为0,每次替换更新,版本号加1。 | @@ -129,12 +140,12 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数 | # | **列名** | **数据类型** | **说明** | | --- | :--------------: | ------------ | ------------------------------------------------------- | -| 1 | db_name | BINARY(32) | 包含此索引的表所在的数据库名 | -| 2 | table_name | BINARY(192) | 包含此索引的表的名称 | -| 3 | index_name | BINARY(192) | 索引名 | -| 4 | column_name | BINARY(64) | 建索引的列的列名 | -| 5 | index_type | BINARY(10) | 目前有 SMA 和 tag | -| 6 | index_extensions | BINARY(256) | 索引的额外信息。对 SMA/tag 类型的索引,是函数名的列表。 | +| 1 | db_name | VARCHAR(32) | 包含此索引的表所在的数据库名 | +| 2 | table_name | VARCHAR(192) | 包含此索引的表的名称 | +| 3 | index_name | VARCHAR(192) | 索引名 | +| 4 | column_name | VARCHAR(64) | 建索引的列的列名 | +| 5 | index_type | VARCHAR(10) | 目前有 SMA 和 tag | +| 6 | index_extensions | VARCHAR(256) | 索引的额外信息。对 SMA/tag 类型的索引,是函数名的列表。 | ## INS_STABLES @@ -142,16 +153,16 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数 | # | **列名** | **数据类型** | **说明** | | --- | :-----------: | ------------ | ----------------------------------------------------------------------------------------------------- | -| 1 | stable_name | BINARY(192) | 超级表表名 | -| 2 | db_name | BINARY(64) | 超级表所在的数据库的名称 | +| 1 | stable_name | VARCHAR(192) | 超级表表名 | +| 2 | db_name | VARCHAR(64) | 超级表所在的数据库的名称 | | 3 | create_time | TIMESTAMP | 创建时间 | | 4 | columns | INT | 列数目 | | 5 | tags | INT | 标签数目。需要注意,`tags` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | | 6 | last_update | TIMESTAMP | 最后更新时间 | -| 7 | table_comment | BINARY(1024) | 表注释 | -| 8 | watermark | BINARY(64) | 窗口的关闭时间。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | -| 9 | max_delay | BINARY(64) | 推送计算结果的最大延迟。需要注意,`max_delay` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | -| 10 | rollup | BINARY(128) | rollup 聚合函数。需要注意,`rollup` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | +| 7 | table_comment | VARCHAR(1024) | 表注释 | +| 8 | watermark | VARCHAR(64) | 窗口的关闭时间。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | +| 9 | max_delay | VARCHAR(64) | 推送计算结果的最大延迟。需要注意,`max_delay` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | +| 10 | rollup | VARCHAR(128) | rollup 聚合函数。需要注意,`rollup` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | ## INS_TABLES @@ -159,37 +170,37 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数 | # | **列名** | **数据类型** | **说明** | | --- | :-----------: | ------------ | ------------------------------------------------------------------------------------- | -| 1 | table_name | BINARY(192) | 表名 | -| 2 | db_name | BINARY(64) | 数据库名 | +| 1 | table_name | VARCHAR(192) | 表名 | +| 2 | db_name | VARCHAR(64) | 数据库名 | | 3 | create_time | TIMESTAMP | 创建时间 | | 4 | columns | INT | 列数目 | -| 5 | stable_name | BINARY(192) | 所属的超级表表名 | +| 5 | stable_name | VARCHAR(192) | 所属的超级表表名 | | 6 | uid | BIGINT | 表 id | | 7 | vgroup_id | INT | vgroup id | | 8 | ttl | INT | 表的生命周期。需要注意,`ttl` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | -| 9 | table_comment | BINARY(1024) | 表注释 | -| 10 | type | BINARY(21) | 表类型 | +| 9 | table_comment | VARCHAR(1024) | 表注释 | +| 10 | type | VARCHAR(21) | 表类型 | ## INS_TAGS | # | **列名** | **数据类型** | **说明** | | --- | :---------: | ------------- | ---------------------- | -| 1 | table_name | BINARY(192) | 表名 | -| 2 | db_name | BINARY(64) | 该表所在的数据库的名称 | -| 3 | stable_name | BINARY(192) | 所属的超级表表名 | -| 4 | tag_name | BINARY(64) | tag 的名称 | -| 5 | tag_type | BINARY(64) | tag 的类型 | -| 6 | tag_value | BINARY(16384) | tag 的值 | +| 1 | table_name | VARCHAR(192) | 表名 | +| 2 | db_name | VARCHAR(64) | 该表所在的数据库的名称 | +| 3 | stable_name | VARCHAR(192) | 所属的超级表表名 | +| 4 | tag_name | VARCHAR(64) | tag 的名称 | +| 5 | tag_type | VARCHAR(64) | tag 的类型 | +| 6 | tag_value | VARCHAR(16384) | tag 的值 | ## INS_COLUMNS | # | **列名** | **数据类型** | **说明** | | --- | :-----------: | ------------ | ---------------------- | -| 1 | table_name | BINARY(192) | 表名 | -| 2 | db_name | BINARY(64) | 该表所在的数据库的名称 | -| 3 | table_type | BINARY(21) | 表类型 | -| 4 | col_name | BINARY(64) | 列 的名称 | -| 5 | col_type | BINARY(32) | 列 的类型 | +| 1 | table_name | VARCHAR(192) | 表名 | +| 2 | db_name | VARCHAR(64) | 该表所在的数据库的名称 | +| 3 | table_type | VARCHAR(21) | 表类型 | +| 4 | col_name | VARCHAR(64) | 列 的名称 | +| 5 | col_type | VARCHAR(32) | 列 的类型 | | 6 | col_length | INT | 列 的长度 | | 7 | col_precision | INT | 列 的精度 | | 8 | col_scale | INT | 列 的比例 | @@ -197,51 +208,51 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数 ## INS_USERS -提供系统中创建的用户的相关信息。 +提供系统中创建的用户的相关信息. SYSINFO 属性为0 的用户不能查看此表。 | # | **列名** | **数据类型** | **说明** | | --- | :---------: | ------------ | -------- | -| 1 | user_name | BINARY(23) | 用户名 | -| 2 | privilege | BINARY(256) | 权限 | +| 1 | user_name | VARCHAR(23) | 用户名 | +| 2 | privilege | VARCHAR(256) | 权限 | | 3 | create_time | TIMESTAMP | 创建时间 | ## INS_GRANTS -提供企业版授权的相关信息。 +提供企业版授权的相关信息。SYSINFO 属性为 0 的用户不能查看此表。 | # | **列名** | **数据类型** | **说明** | | --- | :---------: | ------------ | --------------------------------------------------------------------------------------------------------- | -| 1 | version | BINARY(9) | 企业版授权说明:official(官方授权的)/trial(试用的) | -| 2 | cpu_cores | BINARY(9) | 授权使用的 CPU 核心数量 | -| 3 | dnodes | BINARY(10) | 授权使用的 dnode 节点数量。需要注意,`dnodes` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | -| 4 | streams | BINARY(10) | 授权创建的流数量。需要注意,`streams` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | -| 5 | users | BINARY(10) | 授权创建的用户数量。需要注意,`users` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | -| 6 | accounts | BINARY(10) | 授权创建的帐户数量。需要注意,`accounts` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | -| 7 | storage | BINARY(21) | 授权使用的存储空间大小。需要注意,`storage` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | -| 8 | connections | BINARY(21) | 授权使用的客户端连接数量。需要注意,`connections` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | -| 9 | databases | BINARY(11) | 授权使用的数据库数量。需要注意,`databases` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | -| 10 | speed | BINARY(9) | 授权使用的数据点每秒写入数量 | -| 11 | querytime | BINARY(9) | 授权使用的查询总时长 | -| 12 | timeseries | BINARY(21) | 授权使用的测点数量 | -| 13 | expired | BINARY(5) | 是否到期,true:到期,false:未到期 | -| 14 | expire_time | BINARY(19) | 试用期到期时间 | +| 1 | version | VARCHAR(9) | 企业版授权说明:official(官方授权的)/trial(试用的) | +| 2 | cpu_cores | VARCHAR(9) | 授权使用的 CPU 核心数量 | +| 3 | dnodes | VARCHAR(10) | 授权使用的 dnode 节点数量。需要注意,`dnodes` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | +| 4 | streams | VARCHAR(10) | 授权创建的流数量。需要注意,`streams` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | +| 5 | users | VARCHAR(10) | 授权创建的用户数量。需要注意,`users` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | +| 6 | accounts | VARCHAR(10) | 授权创建的帐户数量。需要注意,`accounts` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | +| 7 | storage | VARCHAR(21) | 授权使用的存储空间大小。需要注意,`storage` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | +| 8 | connections | VARCHAR(21) | 授权使用的客户端连接数量。需要注意,`connections` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | +| 9 | databases | VARCHAR(11) | 授权使用的数据库数量。需要注意,`databases` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | +| 10 | speed | VARCHAR(9) | 授权使用的数据点每秒写入数量 | +| 11 | querytime | VARCHAR(9) | 授权使用的查询总时长 | +| 12 | timeseries | VARCHAR(21) | 授权使用的测点数量 | +| 13 | expired | VARCHAR(5) | 是否到期,true:到期,false:未到期 | +| 14 | expire_time | VARCHAR(19) | 试用期到期时间 | ## INS_VGROUPS -系统中所有 vgroups 的信息。 +系统中所有 vgroups 的信息。SYSINFO 属性为 0 的用户不能查看此表。 | # | **列名** | **数据类型** | **说明** | | --- | :-------: | ------------ | ------------------------------------------------------------------------------------------------ | | 1 | vgroup_id | INT | vgroup id | -| 2 | db_name | BINARY(32) | 数据库名 | +| 2 | db_name | VARCHAR(32) | 数据库名 | | 3 | tables | INT | 此 vgroup 内有多少表。需要注意,`tables` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | -| 4 | status | BINARY(10) | 此 vgroup 的状态 | +| 4 | status | VARCHAR(10) | 此 vgroup 的状态 | | 5 | v1_dnode | INT | 第一个成员所在的 dnode 的 id | -| 6 | v1_status | BINARY(10) | 第一个成员的状态 | +| 6 | v1_status | VARCHAR(10) | 第一个成员的状态 | | 7 | v2_dnode | INT | 第二个成员所在的 dnode 的 id | -| 8 | v2_status | BINARY(10) | 第二个成员的状态 | +| 8 | v2_status | VARCHAR(10) | 第二个成员的状态 | | 9 | v3_dnode | INT | 第三个成员所在的 dnode 的 id | -| 10 | v3_status | BINARY(10) | 第三个成员的状态 | +| 10 | v3_status | VARCHAR(10) | 第三个成员的状态 | | 11 | nfiles | INT | 此 vgroup 中数据/元数据文件的数量 | | 12 | file_size | INT | 此 vgroup 中数据/元数据文件的大小 | | 13 | tsma | TINYINT | 此 vgroup 是否专用于 Time-range-wise SMA,1: 是, 0: 否 | @@ -252,55 +263,57 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数 | # | **列名** | **数据类型** | **说明** | | --- | :------: | ------------ | --------------------------------------------------------------------------------------- | -| 1 | name | BINARY(32) | 配置项名称 | -| 2 | value | BINARY(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | +| 1 | name | VARCHAR(32) | 配置项名称 | +| 2 | value | VARCHAR(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | ## INS_DNODE_VARIABLES -系统中每个 dnode 的配置参数。 +系统中每个 dnode 的配置参数。SYSINFO 属性 为 0 的用户不能查看此表。 | # | **列名** | **数据类型** | **说明** | | --- | :------: | ------------ | --------------------------------------------------------------------------------------- | | 1 | dnode_id | INT | dnode 的 ID | -| 2 | name | BINARY(32) | 配置项名称 | -| 3 | value | BINARY(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | +| 2 | name | VARCHAR(32) | 配置项名称 | +| 3 | value | VARCHAR(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | ## INS_TOPICS | # | **列名** | **数据类型** | **说明** | | --- | :---------: | ------------ | ------------------------------ | -| 1 | topic_name | BINARY(192) | topic 名称 | -| 2 | db_name | BINARY(64) | topic 相关的 DB | +| 1 | topic_name | VARCHAR(192) | topic 名称 | +| 2 | db_name | VARCHAR(64) | topic 相关的 DB | | 3 | create_time | TIMESTAMP | topic 的 创建时间 | -| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 | +| 4 | sql | VARCHAR(1024) | 创建该 topic 时所用的 SQL 语句 | ## INS_SUBSCRIPTIONS | # | **列名** | **数据类型** | **说明** | | --- | :------------: | ------------ | ------------------------ | -| 1 | topic_name | BINARY(204) | 被订阅的 topic | -| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 | +| 1 | topic_name | VARCHAR(204) | 被订阅的 topic | +| 2 | consumer_group | VARCHAR(193) | 订阅者的消费者组 | | 3 | vgroup_id | INT | 消费者被分配的 vgroup id | | 4 | consumer_id | BIGINT | 消费者的唯一 id | -| 5 | offset | BINARY(64) | 消费者的消费进度 | +| 5 | offset | VARCHAR(64) | 消费者的消费进度 | | 6 | rows | BIGINT | 消费者的消费的数据条数 | ## INS_STREAMS | # | **列名** | **数据类型** | **说明** | | --- | :----------: | ------------ | -------------------------------------------------------------------------------------------------------------------- | -| 1 | stream_name | BINARY(64) | 流计算名称 | +| 1 | stream_name | VARCHAR(64) | 流计算名称 | | 2 | create_time | TIMESTAMP | 创建时间 | -| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 | -| 4 | status | BINARY(20) | 流当前状态 | -| 5 | source_db | BINARY(64) | 源数据库 | -| 6 | target_db | BINARY(64) | 目的数据库 | -| 7 | target_table | BINARY(192) | 流计算写入的目标表 | +| 3 | sql | VARCHAR(1024) | 创建流计算时提供的 SQL 语句 | +| 4 | status | VARCHAR(20) | 流当前状态 | +| 5 | source_db | VARCHAR(64) | 源数据库 | +| 6 | target_db | VARCHAR(64) | 目的数据库 | +| 7 | target_table | VARCHAR(192) | 流计算写入的目标表 | | 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | | 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算。需要注意,`trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | ## INS_USER_PRIVILEGES +注:SYSINFO 属性为 0 的用户不能查看此表。 + | # | **列名** | **数据类型** | **说明** | | --- | :----------: | ------------ | -------------------------------------------------------------------------------------------------------------------- | | 1 | user_name | VARCHAR(24) | 用户名 diff --git a/docs/zh/12-taos-sql/24-show.md b/docs/zh/12-taos-sql/24-show.md index 197a7c78d6..09333dd0b7 100644 --- a/docs/zh/12-taos-sql/24-show.md +++ b/docs/zh/12-taos-sql/24-show.md @@ -73,10 +73,10 @@ SHOW CREATE TABLE [db_name.]tb_name ## SHOW DATABASES ```sql -SHOW DATABASES; +SHOW [USER | SYSTEM] DATABASES; ``` -显示用户定义的所有数据库。 +显示定义的所有数据库。SYSTEM 指定只显示系统数据库。USER 指定只显示用户创建的数据库。 ## SHOW DNODES @@ -183,10 +183,10 @@ SHOW SUBSCRIPTIONS; ## SHOW TABLES ```sql -SHOW [db_name.]TABLES [LIKE 'pattern']; +SHOW [NORMAL | CHILD] [db_name.]TABLES [LIKE 'pattern']; ``` -显示当前数据库下的所有普通表和子表的信息。可以使用 LIKE 对表名进行模糊匹配。 +显示当前数据库下的所有普通表和子表的信息。可以使用 LIKE 对表名进行模糊匹配。NORMAL 指定只显示普通表信息, CHILD 指定只显示子表信息。 ## SHOW TABLE DISTRIBUTED diff --git a/docs/zh/14-reference/05-taosbenchmark.md b/docs/zh/14-reference/05-taosbenchmark.md index cc9f467138..597c188c11 100644 --- a/docs/zh/14-reference/05-taosbenchmark.md +++ b/docs/zh/14-reference/05-taosbenchmark.md @@ -395,6 +395,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) ### 查询场景配置参数 查询场景下 `filetype` 必须设置为 `query`。 +`query_times` 指定运行查询的次数,数值类型 查询场景可以通过设置 `kill_slow_query_threshold` 和 `kill_slow_query_interval` 参数来控制杀掉慢查询语句的执行,threshold 控制如果 exec_usec 超过指定时间的查询将被 taosBenchmark 杀掉,单位为秒;interval 控制休眠时间,避免持续查询慢查询消耗 CPU ,单位为秒。 diff --git a/docs/zh/14-reference/06-taosdump.md b/docs/zh/14-reference/06-taosdump.md index 9fe3c5af7a..8972e587b0 100644 --- a/docs/zh/14-reference/06-taosdump.md +++ b/docs/zh/14-reference/06-taosdump.md @@ -106,7 +106,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...] use letter and number only. Default is NOT. -n, --no-escape No escape char '`'. Default is using it. -Q, --dot-replace Repalce dot character with underline character in - the table name. + the table name.(Version 2.5.3) -T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is 8. -C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service @@ -116,6 +116,10 @@ Usage: taosdump [OPTION...] dbname [tbname ...] -?, --help Give this help list --usage Give a short usage message -V, --version Print program version + -W, --rename=RENAME-LIST Rename database name with new name during + importing data. RENAME-LIST: + "db1=newDB1|db2=newDB2" means rename db1 to newDB1 + and rename db2 to newDB2 (Version 2.5.4) Mandatory or optional arguments to long options are also mandatory or optional for any corresponding short options. diff --git a/docs/zh/17-operation/06-monitor.md b/docs/zh/17-operation/06-monitor.md index 4f8dccc78d..563a7fc6f7 100644 --- a/docs/zh/17-operation/06-monitor.md +++ b/docs/zh/17-operation/06-monitor.md @@ -102,22 +102,22 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db, |field|type|is\_tag|comment| |:----|:---|:-----|:------| |ts|TIMESTAMP||timestamp| -|uptime|FLOAT||dnode uptime| +|uptime|FLOAT||dnode uptime,单位:天| |cpu\_engine|FLOAT||taosd cpu 使用率,从 `/proc//stat` 读取| |cpu\_system|FLOAT||服务器 cpu 使用率,从 `/proc/stat` 读取| |cpu\_cores|FLOAT||服务器 cpu 核数| |mem\_engine|INT||taosd 内存使用率,从 `/proc//status` 读取| -|mem\_system|INT||服务器可用内存| +|mem\_system|INT||服务器可用内存,单位 KB| |mem\_total|INT||服务器内存总量,单位 KB| -|disk\_engine|INT||| +|disk\_engine|INT||单位 bytes| |disk\_used|BIGINT||data dir 挂载的磁盘使用量,单位 bytes| |disk\_total|BIGINT||data dir 挂载的磁盘总容量,单位 bytes| -|net\_in|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 received bytes。单位 kb/s| -|net\_out|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 transmit bytes。单位 kb/s| -|io\_read|FLOAT||io 吞吐率,从 `/proc//io` 中读取的 rchar 与上次数值计算之后,计算得到速度。单位 kb/s| -|io\_write|FLOAT||io 吞吐率,从 `/proc//io` 中读取的 wchar 与上次数值计算之后,计算得到速度。单位 kb/s| -|io\_read\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc//io` 中读取的 read_bytes。单位 kb/s| -|io\_write\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc//io` 中读取的 write_bytes。单位 kb/s| +|net\_in|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 received bytes。单位 byte/s| +|net\_out|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 transmit bytes。单位 byte/s| +|io\_read|FLOAT||io 吞吐率,从 `/proc//io` 中读取的 rchar 与上次数值计算之后,计算得到速度。单位 byte/s| +|io\_write|FLOAT||io 吞吐率,从 `/proc//io` 中读取的 wchar 与上次数值计算之后,计算得到速度。单位 byte/s| +|io\_read\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc//io` 中读取的 read_bytes。单位 byte/s| +|io\_write\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc//io` 中读取的 write_bytes。单位 byte/s| |req\_select|INT||两个间隔内发生的查询请求数目| |req\_select\_rate|FLOAT||两个间隔内的查询请求速度 = `req_select / monitorInterval`| |req\_insert|INT||两个间隔内发生的写入请求,包含的单条数据数目| @@ -146,9 +146,9 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db, |ts|TIMESTAMP||timestamp| |name|NCHAR||data 目录,一般为 `/var/lib/taos`| |level|INT||0、1、2 多级存储级别| -|avail|BIGINT||data 目录可用空间| -|used|BIGINT||data 目录已使用空间| -|total|BIGINT||data 目录空间| +|avail|BIGINT||data 目录可用空间。单位 byte| +|used|BIGINT||data 目录已使用空间。单位 byte| +|total|BIGINT||data 目录空间。单位 byte| |dnode\_id|INT|TAG|dnode id| |dnode\_ep|NCHAR|TAG|dnode endpoint| |cluster\_id|NCHAR|TAG|cluster id| @@ -161,9 +161,9 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db, |:----|:---|:-----|:------| |ts|TIMESTAMP||timestamp| |name|NCHAR||log 目录名,一般为 `/var/log/taos/`| -|avail|BIGINT||log 目录可用空间| -|used|BIGINT||log 目录已使用空间| -|total|BIGINT||log 目录空间| +|avail|BIGINT||log 目录可用空间。单位 byte| +|used|BIGINT||log 目录已使用空间。单位 byte| +|total|BIGINT||log 目录空间。单位 byte| |dnode\_id|INT|TAG|dnode id| |dnode\_ep|NCHAR|TAG|dnode endpoint| |cluster\_id|NCHAR|TAG|cluster id| @@ -176,9 +176,9 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db, |:----|:---|:-----|:------| |ts|TIMESTAMP||timestamp| |name|NCHAR||temp 目录名,一般为 `/tmp/`| -|avail|BIGINT||temp 目录可用空间| -|used|BIGINT||temp 目录已使用空间| -|total|BIGINT||temp 目录空间| +|avail|BIGINT||temp 目录可用空间。单位 byte| +|used|BIGINT||temp 目录已使用空间。单位 byte| +|total|BIGINT||temp 目录空间。单位 byte| |dnode\_id|INT|TAG|dnode id| |dnode\_ep|NCHAR|TAG|dnode endpoint| |cluster\_id|NCHAR|TAG|cluster id| diff --git a/docs/zh/20-third-party/70-seeq.md b/docs/zh/20-third-party/70-seeq.md index d5b7463925..f1e11b1b98 100644 --- a/docs/zh/20-third-party/70-seeq.md +++ b/docs/zh/20-third-party/70-seeq.md @@ -14,40 +14,7 @@ Seeq 是制造业和工业互联网(IIOT)高级分析软件。Seeq 支持在 ### Seeq 安装方法 -从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。 - -### Seeq Server 安装和启动 - -``` -tar xvzf seeq-server-xxx.tar.gz -cd seeq-server-installer -sudo ./install - -sudo seeq service enable -sudo seeq start -``` - -### Seeq Data Lab Server 安装和启动 - -Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见[Seeq 官方文档](https://support.seeq.com/space/KB/1034059842)。 - -``` -tar xvf seeq-data-lab--64bit-linux.tar.gz -sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq -sudo seeq config set Network/DataLab/Hostname localhost -sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231) -sudo seeq config set Network/Hostname # the host IP or URL of the main Seeq Server - -# If the main Seeq server is configured to listen over HTTPS -sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443) - -# If the main Seeq server is NOT configured to listen over HTTPS -sudo seeq config set Network/Webserver/Port - -#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server: -sudo seeq config set Network/DataLab/Hostname # the host IP (not URL) of the Data Lab server -sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231 -``` +从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见[Seeq 知识库]( https://support.seeq.com/kb/latest/cloud/)。 ## TDengine 本地实例安装方法 diff --git a/examples/JDBC/JDBCDemo/README-jdbc-windows.md b/examples/JDBC/JDBCDemo/README-jdbc-windows.md index 5a781f40f7..e91a953cd1 100644 --- a/examples/JDBC/JDBCDemo/README-jdbc-windows.md +++ b/examples/JDBC/JDBCDemo/README-jdbc-windows.md @@ -44,17 +44,17 @@ OS name: "windows 10", version: "10.0", arch: "amd64", family: "windows" - + D:\apache-maven-localRepository - - alimaven - aliyun maven - http://maven.aliyun.com/nexus/content/groups/public/ - central - + + alimaven + aliyun maven + http://maven.aliyun.com/nexus/content/groups/public/ + central + @@ -126,7 +126,7 @@ https://www.taosdata.com/cn/all-downloads/ 修改client的hosts文件(C:\Windows\System32\drivers\etc\hosts),将server的hostname和ip配置到client的hosts文件中 ``` -192.168.236.136 td01 +192.168.236.136 td01 ``` 配置完成后,在命令行内使用TDengine CLI连接server端 diff --git a/examples/go/BUILD.md b/examples/go/BUILD.md new file mode 100644 index 0000000000..dd607001cc --- /dev/null +++ b/examples/go/BUILD.md @@ -0,0 +1,3 @@ +go mod init demo +go mod tidy +go build diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h index e04bdd1b07..aed1d03fc1 100644 --- a/include/common/tdataformat.h +++ b/include/common/tdataformat.h @@ -108,7 +108,7 @@ int32_t tBufferReserve(SBuffer *pBuffer, int64_t nData, void **ppData); int32_t tRowBuild(SArray *aColVal, const STSchema *pTSchema, SRow **ppRow); int32_t tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal); void tRowDestroy(SRow *pRow); -void tRowSort(SArray *aRowP); +int32_t tRowSort(SArray *aRowP); int32_t tRowMerge(SArray *aRowP, STSchema *pTSchema, int8_t flag); int32_t tRowUpsertColData(SRow *pRow, STSchema *pTSchema, SColData *aColData, int32_t nColData, int32_t flag); diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 94b8a8348e..d08b424e9c 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -768,6 +768,8 @@ typedef struct { char* pAst2; int64_t deleteMark1; int64_t deleteMark2; + int32_t sqlLen; + char* sql; } SMCreateStbReq; int32_t tSerializeSMCreateStbReq(void* buf, int32_t bufLen, SMCreateStbReq* pReq); @@ -788,10 +790,13 @@ typedef struct { int8_t source; // 1-taosX or 0-taosClient int8_t reserved[6]; tb_uid_t suid; + int32_t sqlLen; + char* sql; } SMDropStbReq; int32_t tSerializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq); int32_t tDeserializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq); +void tFreeSMDropStbReq(SMDropStbReq *pReq); typedef struct { char name[TSDB_TABLE_FNAME_LEN]; @@ -801,6 +806,8 @@ typedef struct { int32_t ttl; int32_t commentLen; char* comment; + int32_t sqlLen; + char* sql; } SMAlterStbReq; int32_t tSerializeSMAlterStbReq(void* buf, int32_t bufLen, SMAlterStbReq* pReq); @@ -871,10 +878,13 @@ int32_t tDeserializeSCreateAcctReq(void* buf, int32_t bufLen, SCreateAcctReq* pR typedef struct { char user[TSDB_USER_LEN]; + int32_t sqlLen; + char* sql; } SDropUserReq, SDropAcctReq; int32_t tSerializeSDropUserReq(void* buf, int32_t bufLen, SDropUserReq* pReq); int32_t tDeserializeSDropUserReq(void* buf, int32_t bufLen, SDropUserReq* pReq); +void tFreeSDropUserReq(SDropUserReq *pReq); typedef struct SIpV4Range{ uint32_t ip; @@ -888,19 +898,21 @@ typedef struct { SIpWhiteList* cloneIpWhiteList(SIpWhiteList* pIpWhiteList); typedef struct { - int8_t createType; - int8_t superUser; // denote if it is a super user or not - int8_t sysInfo; - int8_t enable; - char user[TSDB_USER_LEN]; - char pass[TSDB_USET_PASSWORD_LEN]; + int8_t createType; + int8_t superUser; // denote if it is a super user or not + int8_t sysInfo; + int8_t enable; + char user[TSDB_USER_LEN]; + char pass[TSDB_USET_PASSWORD_LEN]; int32_t numIpRanges; SIpV4Range* pIpRanges; + int32_t sqlLen; + char* sql; } SCreateUserReq; int32_t tSerializeSCreateUserReq(void* buf, int32_t bufLen, SCreateUserReq* pReq); int32_t tDeserializeSCreateUserReq(void* buf, int32_t bufLen, SCreateUserReq* pReq); -void tFreeSCreateUserReq(SCreateUserReq* pReq); +void tFreeSCreateUserReq(SCreateUserReq *pReq); typedef struct { int64_t ver; @@ -927,18 +939,20 @@ int32_t tSerializeRetrieveIpWhite(void* buf, int32_t bufLen, SRetrieveIpWhiteReq int32_t tDeserializeRetrieveIpWhite(void* buf, int32_t bufLen, SRetrieveIpWhiteReq* pReq); typedef struct { - int8_t alterType; - int8_t superUser; - int8_t sysInfo; - int8_t enable; - char user[TSDB_USER_LEN]; - char pass[TSDB_USET_PASSWORD_LEN]; - char objname[TSDB_DB_FNAME_LEN]; // db or topic - char tabName[TSDB_TABLE_NAME_LEN]; - char* tagCond; - int32_t tagCondLen; + int8_t alterType; + int8_t superUser; + int8_t sysInfo; + int8_t enable; + char user[TSDB_USER_LEN]; + char pass[TSDB_USET_PASSWORD_LEN]; + char objname[TSDB_DB_FNAME_LEN]; // db or topic + char tabName[TSDB_TABLE_NAME_LEN]; + char* tagCond; + int32_t tagCondLen; int32_t numIpRanges; SIpV4Range* pIpRanges; + int32_t sqlLen; + char* sql; } SAlterUserReq; int32_t tSerializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq); @@ -1118,6 +1132,8 @@ typedef struct { int16_t hashPrefix; int16_t hashSuffix; int32_t tsdbPageSize; + int32_t sqlLen; + char* sql; } SCreateDbReq; int32_t tSerializeSCreateDbReq(void* buf, int32_t bufLen, SCreateDbReq* pReq); @@ -1144,18 +1160,24 @@ typedef struct { int32_t minRows; int32_t walRetentionPeriod; int32_t walRetentionSize; + int32_t sqlLen; + char* sql; } SAlterDbReq; int32_t tSerializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq); int32_t tDeserializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq); +void tFreeSAlterDbReq(SAlterDbReq* pReq); typedef struct { char db[TSDB_DB_FNAME_LEN]; int8_t ignoreNotExists; + int32_t sqlLen; + char* sql; } SDropDbReq; int32_t tSerializeSDropDbReq(void* buf, int32_t bufLen, SDropDbReq* pReq); int32_t tDeserializeSDropDbReq(void* buf, int32_t bufLen, SDropDbReq* pReq); +void tFreeSDropDbReq(SDropDbReq* pReq); typedef struct { char db[TSDB_DB_FNAME_LEN]; @@ -1350,10 +1372,13 @@ void tFreeSUserAuthBatchRsp(SUserAuthBatchRsp* pRsp); typedef struct { char db[TSDB_DB_FNAME_LEN]; STimeWindow timeRange; + int32_t sqlLen; + char* sql; } SCompactDbReq; int32_t tSerializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq); int32_t tDeserializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq); +void tFreeSCompactDbReq(SCompactDbReq *pReq); typedef struct { char name[TSDB_FUNC_NAME_LEN]; @@ -1933,10 +1958,13 @@ void tFreeSExplainRsp(SExplainRsp* pRsp); typedef struct { char fqdn[TSDB_FQDN_LEN]; // end point, hostname:port int32_t port; + int32_t sqlLen; + char* sql; } SCreateDnodeReq; int32_t tSerializeSCreateDnodeReq(void* buf, int32_t bufLen, SCreateDnodeReq* pReq); int32_t tDeserializeSCreateDnodeReq(void* buf, int32_t bufLen, SCreateDnodeReq* pReq); +void tFreeSCreateDnodeReq(SCreateDnodeReq* pReq); typedef struct { int32_t dnodeId; @@ -1944,10 +1972,13 @@ typedef struct { int32_t port; int8_t force; int8_t unsafe; + int32_t sqlLen; + char* sql; } SDropDnodeReq; int32_t tSerializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq); int32_t tDeserializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq); +void tFreeSDropDnodeReq(SDropDnodeReq* pReq); enum { RESTORE_TYPE__ALL = 1, @@ -1959,19 +1990,25 @@ enum { typedef struct { int32_t dnodeId; int8_t restoreType; + int32_t sqlLen; + char* sql; } SRestoreDnodeReq; int32_t tSerializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq); int32_t tDeserializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq); +void tFreeSRestoreDnodeReq(SRestoreDnodeReq *pReq); typedef struct { int32_t dnodeId; char config[TSDB_DNODE_CONFIG_LEN]; char value[TSDB_DNODE_VALUE_LEN]; + int32_t sqlLen; + char* sql; } SMCfgDnodeReq; int32_t tSerializeSMCfgDnodeReq(void* buf, int32_t bufLen, SMCfgDnodeReq* pReq); int32_t tDeserializeSMCfgDnodeReq(void* buf, int32_t bufLen, SMCfgDnodeReq* pReq); +void tFreeSMCfgDnodeReq(SMCfgDnodeReq *pReq); typedef struct { char config[TSDB_DNODE_CONFIG_LEN]; @@ -1983,12 +2020,15 @@ int32_t tDeserializeSDCfgDnodeReq(void* buf, int32_t bufLen, SDCfgDnodeReq* pReq typedef struct { int32_t dnodeId; + int32_t sqlLen; + char* sql; } SMCreateMnodeReq, SMDropMnodeReq, SDDropMnodeReq, SMCreateQnodeReq, SMDropQnodeReq, SDCreateQnodeReq, SDDropQnodeReq, SMCreateSnodeReq, SMDropSnodeReq, SDCreateSnodeReq, SDDropSnodeReq; int32_t tSerializeSCreateDropMQSNodeReq(void* buf, int32_t bufLen, SMCreateQnodeReq* pReq); int32_t tDeserializeSCreateDropMQSNodeReq(void* buf, int32_t bufLen, SMCreateQnodeReq* pReq); - +void tFreeSMCreateQnodeReq(SMCreateQnodeReq *pReq); +void tFreeSDDropQnodeReq(SDDropQnodeReq* pReq); typedef struct { int8_t replica; SReplica replicas[TSDB_MAX_REPLICA]; @@ -2023,10 +2063,13 @@ int32_t tDeserializeSKillTransReq(void* buf, int32_t bufLen, SKillTransReq* pReq typedef struct { int32_t useless; // useless + int32_t sqlLen; + char* sql; } SBalanceVgroupReq; int32_t tSerializeSBalanceVgroupReq(void* buf, int32_t bufLen, SBalanceVgroupReq* pReq); int32_t tDeserializeSBalanceVgroupReq(void* buf, int32_t bufLen, SBalanceVgroupReq* pReq); +void tFreeSBalanceVgroupReq(SBalanceVgroupReq *pReq); typedef struct { int32_t vgId1; @@ -2041,18 +2084,24 @@ typedef struct { int32_t dnodeId1; int32_t dnodeId2; int32_t dnodeId3; + int32_t sqlLen; + char* sql; } SRedistributeVgroupReq; int32_t tSerializeSRedistributeVgroupReq(void* buf, int32_t bufLen, SRedistributeVgroupReq* pReq); int32_t tDeserializeSRedistributeVgroupReq(void* buf, int32_t bufLen, SRedistributeVgroupReq* pReq); +void tFreeSRedistributeVgroupReq(SRedistributeVgroupReq *pReq); typedef struct { int32_t useless; int32_t vgId; + int32_t sqlLen; + char* sql; } SBalanceVgroupLeaderReq; int32_t tSerializeSBalanceVgroupLeaderReq(void* buf, int32_t bufLen, SBalanceVgroupLeaderReq* pReq); int32_t tDeserializeSBalanceVgroupLeaderReq(void* buf, int32_t bufLen, SBalanceVgroupLeaderReq* pReq); +void tFreeSBalanceVgroupLeaderReq(SBalanceVgroupLeaderReq *pReq); typedef struct { int32_t vgId; @@ -2526,10 +2575,13 @@ typedef struct { typedef struct { char name[TSDB_TOPIC_FNAME_LEN]; int8_t igNotExists; + int32_t sqlLen; + char* sql; } SMDropTopicReq; int32_t tSerializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq); int32_t tDeserializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq); +void tFreeSMDropTopicReq(SMDropTopicReq *pReq); typedef struct { char topic[TSDB_TOPIC_FNAME_LEN]; @@ -2625,6 +2677,8 @@ typedef struct SVCreateTbReq { SSchemaWrapper schemaRow; } ntb; }; + int32_t sqlLen; + char* sql; } SVCreateTbReq; int tEncodeSVCreateTbReq(SEncoder* pCoder, const SVCreateTbReq* pReq); @@ -2636,6 +2690,7 @@ static FORCE_INLINE void tdDestroySVCreateTbReq(SVCreateTbReq* req) { return; } + taosMemoryFreeClear(req->sql); taosMemoryFreeClear(req->name); taosMemoryFreeClear(req->comment); if (req->type == TSDB_CHILD_TABLE) { @@ -3099,6 +3154,8 @@ typedef struct { typedef struct { char name[TSDB_STREAM_FNAME_LEN]; int8_t igNotExists; + int32_t sqlLen; + char* sql; } SMDropStreamReq; typedef struct { @@ -3125,6 +3182,7 @@ typedef struct { int32_t tSerializeSMDropStreamReq(void* buf, int32_t bufLen, const SMDropStreamReq* pReq); int32_t tDeserializeSMDropStreamReq(void* buf, int32_t bufLen, SMDropStreamReq* pReq); +void tFreeSMDropStreamReq(SMDropStreamReq* pReq); typedef struct { char name[TSDB_STREAM_FNAME_LEN]; diff --git a/include/libs/audit/audit.h b/include/libs/audit/audit.h index 8465ec510e..85d462b96b 100644 --- a/include/libs/audit/audit.h +++ b/include/libs/audit/audit.h @@ -29,7 +29,7 @@ extern "C" { #endif -#define AUDIT_DETAIL_MAX 16000 +#define AUDIT_DETAIL_MAX 65472 typedef struct { const char *server; @@ -39,7 +39,8 @@ typedef struct { int32_t auditInit(const SAuditCfg *pCfg); void auditSend(SJson *pJson); -void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, char *detail); +void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, + char *detail, int32_t len); #ifdef __cplusplus } diff --git a/include/util/talgo.h b/include/util/talgo.h index 7c92c0fe87..b065ea3705 100644 --- a/include/util/talgo.h +++ b/include/util/talgo.h @@ -54,6 +54,17 @@ typedef int32_t (*__ext_compar_fn_t)(const void *p1, const void *p2, const void */ void taosqsort(void *src, int64_t numOfElem, int64_t size, const void *param, __ext_compar_fn_t comparFn); +/** + * merge sort, with the compare function requiring additional parameters support + * + * @param src + * @param numOfElem + * @param size + * @param comparFn + * @return int32_t 0 for success, other for failure. + */ +int32_t taosMergeSort(void *src, int64_t numOfElem, int64_t size, __compar_fn_t comparFn); + /** * binary search, with range support * diff --git a/include/util/tarray.h b/include/util/tarray.h index 4d9c930521..e494f78f48 100644 --- a/include/util/tarray.h +++ b/include/util/tarray.h @@ -214,12 +214,19 @@ void taosArrayDestroyEx(SArray* pArray, FDelete fp); void taosArraySwap(SArray* a, SArray* b); /** - * sort the array + * sort the array use qsort * @param pArray * @param compar */ void taosArraySort(SArray* pArray, __compar_fn_t comparFn); +/** + * sort the array use merge sort + * @param pArray + * @param compar + */ +int32_t taosArrayMSort(SArray* pArray, __compar_fn_t comparFn); + /** * search the array * @param pArray diff --git a/include/util/tarray2.h b/include/util/tarray2.h index cd49e64789..2e9b0c7cb5 100644 --- a/include/util/tarray2.h +++ b/include/util/tarray2.h @@ -165,6 +165,13 @@ static FORCE_INLINE int32_t tarray2SortInsert(void *arr, const void *elePtr, int #define TARRAY2_FOREACH_PTR_REVERSE(a, ep) \ for (int32_t __i = (a)->size - 1; __i >= 0 && ((ep) = &(a)->data[__i], 1); __i--) +#define TARRAY2_SORT(a, cmp) \ + do { \ + if ((a)->size > 1) { \ + taosSort((a)->data, (a)->size, sizeof((a)->data[0]), (__compar_fn_t)cmp); \ + } \ + } while (0) + #ifdef __cplusplus } #endif diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index 2159899aa2..743edb6ca3 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -98,6 +98,9 @@ # enable/disable system monitor # monitor 1 +# enable/disable audit log +# audit 1 + # The following parameter is used to limit the maximum number of lines in log files. # max number of lines per log filters # numOfLogLines 10000000 diff --git a/packaging/cfg/taosd.service b/packaging/cfg/taosd.service index 52c4b1d1e2..bfa330f6cb 100644 --- a/packaging/cfg/taosd.service +++ b/packaging/cfg/taosd.service @@ -8,7 +8,7 @@ Type=simple ExecStart=/usr/bin/taosd ExecStartPre=/usr/local/taos/bin/startPre.sh TimeoutStopSec=1000000s -LimitNOFILE=infinity +LimitNOFILE=1048576 LimitNPROC=infinity LimitCORE=infinity TimeoutStartSec=0 diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 9e70a6bbf1..42465b8783 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -89,7 +89,7 @@ else ${build_dir}/bin/taosBenchmark \ ${build_dir}/bin/TDinsight.sh \ ${build_dir}/bin/tdengine-datasource.zip \ - ${build_dir}/bin/tdengine-datasource.zip.md5sum" + ${build_dir}/bin/tdengine-datasource.zip.md5" fi [ -f ${build_dir}/bin/taosx ] && taosx_bin="${build_dir}/bin/taosx" diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 57cfa61847..5684411646 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -157,6 +157,10 @@ STscObj* taos_connect_internal(const char* ip, const char* user, const char* pas tscDebug("new app inst mgr %p, user:%s, ip:%s, port:%d", p, user, epSet.epSet.eps[0].fqdn, epSet.epSet.eps[0].port); pInst = &p; + } else { + ASSERTS((*pInst) && (*pInst)->pAppHbMgr, "*pInst:%p, pAppHgMgr:%p", *pInst, (*pInst) ? (*pInst)->pAppHbMgr : NULL); + // reset to 0 in case of conn with duplicated user key but its user has ever been dropped. + atomic_store_8(&(*pInst)->pAppHbMgr->connHbFlag, 0); } taosThreadMutexUnlock(&appInfo.mutex); diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index b7e92d2e65..e7ba30d78c 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -377,6 +377,7 @@ _exit: for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { pCreateReq = req.pReqs + iReq; taosMemoryFreeClear(pCreateReq->comment); + taosMemoryFreeClear(pCreateReq->sql); if (pCreateReq->type == TSDB_CHILD_TABLE) { taosArrayDestroy(pCreateReq->ctb.tagName); } diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 10f8b89f4d..91c21fe344 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -683,7 +683,7 @@ static int32_t smlCheckMeta(SSchema *schema, int32_t length, SArray *cols, bool SSmlKv *kv = (SSmlKv *)taosArrayGet(cols, i); if (taosHashGet(hashTmp, kv->key, kv->keyLen) == NULL) { taosHashCleanup(hashTmp); - return -1; + return TSDB_CODE_SML_INVALID_DATA; } } taosHashCleanup(hashTmp); diff --git a/source/client/src/clientSmlJson.c b/source/client/src/clientSmlJson.c index f9076112c4..167638ab69 100644 --- a/source/client/src/clientSmlJson.c +++ b/source/client/src/clientSmlJson.c @@ -256,7 +256,8 @@ int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset) { } if (unlikely(index >= OTD_JSON_FIELDS_NUM)) { - uError("index >= %d, %s", OTD_JSON_FIELDS_NUM, *start) return -1; + uError("index >= %d, %s", OTD_JSON_FIELDS_NUM, *start); + return TSDB_CODE_TSC_INVALID_JSON; } char *sTmp = *start; @@ -367,7 +368,8 @@ int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset) { if (unlikely(index != OTD_JSON_FIELDS_NUM) || element->tags == NULL || element->cols == NULL || element->measure == NULL || element->timestamp == NULL) { - uError("elements != %d or element parse null", OTD_JSON_FIELDS_NUM) return -1; + uError("elements != %d or element parse null", OTD_JSON_FIELDS_NUM); + return TSDB_CODE_TSC_INVALID_JSON; } return 0; } @@ -381,7 +383,8 @@ int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset) { } if (unlikely(index >= OTD_JSON_FIELDS_NUM)) { - uError("index >= %d, %s", OTD_JSON_FIELDS_NUM, *start) return -1; + uError("index >= %d, %s", OTD_JSON_FIELDS_NUM, *start); + return TSDB_CODE_TSC_INVALID_JSON; } if ((*start)[1] == 'm') { @@ -448,7 +451,8 @@ int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset) { } if (unlikely(index != 0 && index != OTD_JSON_FIELDS_NUM)) { - uError("elements != %d", OTD_JSON_FIELDS_NUM) return -1; + uError("elements != %d", OTD_JSON_FIELDS_NUM); + return TSDB_CODE_TSC_INVALID_JSON; } return 0; } @@ -477,7 +481,7 @@ static int32_t smlGetJsonElements(cJSON *root, cJSON ***marks) { } if (*marks[i] == NULL) { uError("smlGetJsonElements error, not find mark:%d:%s", i, jsonName[i]); - return -1; + return TSDB_CODE_TSC_INVALID_JSON; } } return TSDB_CODE_SUCCESS; @@ -816,25 +820,25 @@ static int64_t smlParseTSFromJSONObj(SSmlHandle *info, cJSON *root, int32_t toPr int32_t size = cJSON_GetArraySize(root); if (unlikely(size != OTD_JSON_SUB_FIELDS_NUM)) { smlBuildInvalidDataMsg(&info->msgBuf, "invalidate json", NULL); - return -1; + return TSDB_CODE_TSC_INVALID_JSON; } cJSON *value = cJSON_GetObjectItem(root, "value"); if (unlikely(!cJSON_IsNumber(value))) { smlBuildInvalidDataMsg(&info->msgBuf, "invalidate json", NULL); - return -1; + return TSDB_CODE_TSC_INVALID_JSON; } cJSON *type = cJSON_GetObjectItem(root, "type"); if (unlikely(!cJSON_IsString(type))) { smlBuildInvalidDataMsg(&info->msgBuf, "invalidate json", NULL); - return -1; + return TSDB_CODE_TSC_INVALID_JSON; } double timeDouble = value->valuedouble; if (unlikely(smlDoubleToInt64OverFlow(timeDouble))) { smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is too large", NULL); - return -1; + return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; } if (timeDouble == 0) { @@ -849,32 +853,29 @@ static int64_t smlParseTSFromJSONObj(SSmlHandle *info, cJSON *root, int32_t toPr size_t typeLen = strlen(type->valuestring); if (typeLen == 1 && (type->valuestring[0] == 's' || type->valuestring[0] == 'S')) { // seconds - int8_t fromPrecision = TSDB_TIME_PRECISION_SECONDS; +// int8_t fromPrecision = TSDB_TIME_PRECISION_SECONDS; if (smlFactorS[toPrecision] < INT64_MAX / tsInt64) { return tsInt64 * smlFactorS[toPrecision]; } - return -1; + return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; } else if (typeLen == 2 && (type->valuestring[1] == 's' || type->valuestring[1] == 'S')) { switch (type->valuestring[0]) { case 'm': case 'M': // milliseconds return convertTimePrecision(tsInt64, TSDB_TIME_PRECISION_MILLI, toPrecision); - break; case 'u': case 'U': // microseconds return convertTimePrecision(tsInt64, TSDB_TIME_PRECISION_MICRO, toPrecision); - break; case 'n': case 'N': return convertTimePrecision(tsInt64, TSDB_TIME_PRECISION_NANO, toPrecision); - break; default: - return -1; + return TSDB_CODE_TSC_INVALID_JSON_TYPE; } } else { - return -1; + return TSDB_CODE_TSC_INVALID_JSON_TYPE; } } @@ -895,7 +896,7 @@ static int64_t smlParseTSFromJSON(SSmlHandle *info, cJSON *timestamp) { double timeDouble = timestamp->valuedouble; if (unlikely(smlDoubleToInt64OverFlow(timeDouble))) { smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is too large", NULL); - return -1; + return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; } if (unlikely(timeDouble < 0)) { @@ -911,14 +912,14 @@ static int64_t smlParseTSFromJSON(SSmlHandle *info, cJSON *timestamp) { if (unlikely(fromPrecision == -1)) { smlBuildInvalidDataMsg(&info->msgBuf, "timestamp precision can only be seconds(10 digits) or milli seconds(13 digits)", NULL); - return -1; + return TSDB_CODE_SML_INVALID_DATA; } int64_t tsInt64 = timeDouble; if (fromPrecision == TSDB_TIME_PRECISION_SECONDS) { if (smlFactorS[toPrecision] < INT64_MAX / tsInt64) { return tsInt64 * smlFactorS[toPrecision]; } - return -1; + return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; } else { return convertTimePrecision(timeDouble, fromPrecision, toPrecision); } @@ -926,7 +927,7 @@ static int64_t smlParseTSFromJSON(SSmlHandle *info, cJSON *timestamp) { return smlParseTSFromJSONObj(info, timestamp, toPrecision); } else { smlBuildInvalidDataMsg(&info->msgBuf, "invalidate json", NULL); - return -1; + return TSDB_CODE_TSC_INVALID_JSON; } } diff --git a/source/client/src/clientSmlLine.c b/source/client/src/clientSmlLine.c index a565fb1a21..006475654a 100644 --- a/source/client/src/clientSmlLine.c +++ b/source/client/src/clientSmlLine.c @@ -70,7 +70,7 @@ static int64_t smlParseInfluxTime(SSmlHandle *info, const char *data, int32_t le int64_t ts = smlGetTimeValue(data, len, fromPrecision, toPrecision); if (unlikely(ts == -1)) { smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", data); - return -1; + return TSDB_CODE_SML_INVALID_DATA; } return ts; } diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 6ee5508048..781b362674 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -26,8 +26,7 @@ #define EMPTY_BLOCK_POLL_IDLE_DURATION 10 #define DEFAULT_AUTO_COMMIT_INTERVAL 5000 - -#define OFFSET_IS_RESET_OFFSET(_of) ((_of) < 0) +#define DEFAULT_HEARTBEAT_INTERVAL 3000 struct SMqMgmt { int8_t inited; @@ -63,8 +62,7 @@ struct tmq_conf_t { int8_t resetOffset; int8_t withTbName; int8_t snapEnable; - int32_t snapBatchSize; - bool hbBgEnable; +// int32_t snapBatchSize; uint16_t port; int32_t autoCommitInterval; char* ip; @@ -84,7 +82,6 @@ struct tmq_t { int32_t autoCommitInterval; int8_t resetOffsetCfg; uint64_t consumerId; - bool hbBgEnable; tmq_commit_cb* commitCb; void* commitCbUserParam; @@ -269,8 +266,7 @@ tmq_conf_t* tmq_conf_new() { conf->withTbName = false; conf->autoCommit = true; conf->autoCommitInterval = DEFAULT_AUTO_COMMIT_INTERVAL; - conf->resetOffset = TMQ_OFFSET__RESET_EARLIEST; - conf->hbBgEnable = true; + conf->resetOffset = TMQ_OFFSET__RESET_LATEST; return conf; } @@ -360,10 +356,10 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value } } - if (strcasecmp(key, "experimental.snapshot.batch.size") == 0) { - conf->snapBatchSize = taosStr2int64(value); - return TMQ_CONF_OK; - } +// if (strcasecmp(key, "experimental.snapshot.batch.size") == 0) { +// conf->snapBatchSize = taosStr2int64(value); +// return TMQ_CONF_OK; +// } // if (strcasecmp(key, "enable.heartbeat.background") == 0) { // if (strcasecmp(value, "true") == 0) { @@ -822,7 +818,7 @@ void tmqSendHbReq(void* param, void* tmrId) { OVER: tDeatroySMqHbReq(&req); - taosTmrReset(tmqSendHbReq, 1000, param, tmqMgmt.timer, &tmq->hbLiveTimer); + taosTmrReset(tmqSendHbReq, DEFAULT_HEARTBEAT_INTERVAL, param, tmqMgmt.timer, &tmq->hbLiveTimer); taosReleaseRef(tmqMgmt.rsetId, refId); } @@ -1077,8 +1073,6 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { pTmq->resetOffsetCfg = conf->resetOffset; taosInitRWLatch(&pTmq->lock); - pTmq->hbBgEnable = conf->hbBgEnable; - // assign consumerId pTmq->consumerId = tGenIdPI64(); @@ -1102,19 +1096,16 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { goto _failed; } - if (pTmq->hbBgEnable) { - int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t)); - *pRefId = pTmq->refId; - pTmq->hbLiveTimer = taosTmrStart(tmqSendHbReq, 1000, pRefId, tmqMgmt.timer); - } + int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t)); + *pRefId = pTmq->refId; + pTmq->hbLiveTimer = taosTmrStart(tmqSendHbReq, DEFAULT_HEARTBEAT_INTERVAL, pRefId, tmqMgmt.timer); char buf[TSDB_OFFSET_LEN] = {0}; STqOffsetVal offset = {.type = pTmq->resetOffsetCfg}; tFormatOffset(buf, tListLen(buf), &offset); tscInfo("consumer:0x%" PRIx64 " is setup, refId:%" PRId64 - ", groupId:%s, snapshot:%d, autoCommit:%d, commitInterval:%dms, offset:%s, backgroudHB:%d", - pTmq->consumerId, pTmq->refId, pTmq->groupId, pTmq->useSnapshot, pTmq->autoCommit, pTmq->autoCommitInterval, - buf, pTmq->hbBgEnable); + ", groupId:%s, snapshot:%d, autoCommit:%d, commitInterval:%dms, offset:%s", + pTmq->consumerId, pTmq->refId, pTmq->groupId, pTmq->useSnapshot, pTmq->autoCommit, pTmq->autoCommitInterval, buf); return pTmq; diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp index 6481dfe5bf..addf0aa629 100644 --- a/source/client/test/clientTests.cpp +++ b/source/client/test/clientTests.cpp @@ -47,7 +47,8 @@ void printSubResults(void* pRes, int32_t* totalRows) { int32_t precision = taos_result_precision(pRes); taos_print_row(buf, row, fields, numOfFields); *totalRows += 1; - printf("vgId: %d, offset: %lld, precision: %d, row content: %s\n", vgId, offset, precision, buf); + std::cout << "vgId:" << vgId << ", offset:" << offset << ", precision:" << precision << ", row content:" << buf + << std::endl; } // taos_free_result(pRes); @@ -832,7 +833,7 @@ TEST(clientCase, projection_query_tables) { for(int32_t i = 0; i < 1000000; ++i) { char t[512] = {0}; - sprintf(t, "insert into t1 values(now, %ld)", i); + sprintf(t, "insert into t1 values(now, %d)", i); while(1) { void* p = taos_query(pConn, t); code = taos_errno(p); @@ -1167,16 +1168,19 @@ TEST(clientCase, tmq_commit) { } for(int i = 0; i < numOfAssign; i++){ - printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end); + tmq_topic_assignment* pa = &pAssign[i]; + std::cout << "assign i:" << i << ", vgId:" << pa->vgId << ", offset:" << pa->currentOffset << ", start:%" + << pa->begin << ", end:%" << pa->end << std::endl; - int64_t committed = tmq_committed(tmq, topicName, pAssign[i].vgId); - printf("committed vgId:%d, committed:%lld\n", pAssign[i].vgId, committed); + int64_t committed = tmq_committed(tmq, topicName, pa->vgId); + std::cout << "committed vgId:" << pa->vgId << " committed:" << committed << std::endl; - int64_t position = tmq_position(tmq, topicName, pAssign[i].vgId); - printf("position vgId:%d, position:%lld\n", pAssign[i].vgId, position); - tmq_offset_seek(tmq, topicName, pAssign[i].vgId, 1); - position = tmq_position(tmq, topicName, pAssign[i].vgId); - printf("after seek 1, position vgId:%d, position:%lld\n", pAssign[i].vgId, position); + int64_t position = tmq_position(tmq, topicName, pa->vgId); + std::cout << "position vgId:" << pa->vgId << ", position:" << position << std::endl; + + tmq_offset_seek(tmq, topicName, pa->vgId, 1); + position = tmq_position(tmq, topicName, pa->vgId); + std::cout << "after seek 1, position vgId:" << pa->vgId << " position:" << position << std::endl; } while (1) { @@ -1191,12 +1195,14 @@ TEST(clientCase, tmq_commit) { tmq_commit_sync(tmq, pRes); for(int i = 0; i < numOfAssign; i++) { int64_t committed = tmq_committed(tmq, topicName, pAssign[i].vgId); - printf("committed vgId:%d, committed:%lld\n", pAssign[i].vgId, committed); + std::cout << "committed vgId:" << pAssign[i].vgId << " , committed:" << committed << std::endl; if(committed > 0){ int32_t code = tmq_commit_offset_sync(tmq, topicName, pAssign[i].vgId, 4); printf("tmq_commit_offset_sync vgId:%d, offset:4, code:%d\n", pAssign[i].vgId, code); int64_t committed = tmq_committed(tmq, topicName, pAssign[i].vgId); - printf("after tmq_commit_offset_sync, committed vgId:%d, committed:%lld\n", pAssign[i].vgId, committed); + + std::cout << "after tmq_commit_offset_sync, committed vgId:" << pAssign[i].vgId << ", committed:" << committed + << std::endl; } } if (pRes != NULL) { @@ -1212,7 +1218,12 @@ TEST(clientCase, tmq_commit) { taos_close(pConn); fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); } - +namespace { +void doPrintInfo(tmq_topic_assignment* pa, int32_t index) { + std::cout << "assign i:" << index << ", vgId:" << pa->vgId << ", offset:%" << pa->currentOffset << ", start:%" + << pa->begin << ", end:%" << pa->end << std::endl; +} +} TEST(clientCase, td_25129) { // taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg"); @@ -1264,7 +1275,7 @@ TEST(clientCase, td_25129) { } for(int i = 0; i < numOfAssign; i++){ - printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end); + doPrintInfo(&pAssign[i], i); } // tmq_offset_seek(tmq, "tp", pAssign[0].vgId, 4); @@ -1281,7 +1292,7 @@ TEST(clientCase, td_25129) { } for(int i = 0; i < numOfAssign; i++){ - printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end); + doPrintInfo(&pAssign[i], i); } tmq_free_assignment(pAssign); @@ -1298,7 +1309,7 @@ TEST(clientCase, td_25129) { for(int i = 0; i < numOfAssign; i++){ int64_t committed = tmq_committed(tmq, topicName, pAssign[i].vgId); - printf("assign i:%d, vgId:%d, committed:%lld, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, committed, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end); + doPrintInfo(&pAssign[i], i); } while (1) { @@ -1328,7 +1339,7 @@ TEST(clientCase, td_25129) { } for(int i = 0; i < numOfAssign; i++){ - printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end); + doPrintInfo(&pAssign[i], i); } } else { for(int i = 0; i < numOfAssign; i++) { @@ -1364,7 +1375,7 @@ TEST(clientCase, td_25129) { } for(int i = 0; i < numOfAssign; i++){ - printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end); + doPrintInfo(&pAssign[i], i); } tmq_free_assignment(pAssign); diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 330eb4ae30..bf21b2eda0 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -2360,27 +2360,26 @@ void trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList int32_t maxRows = 0; size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); - for (int32_t i = 0; i < numOfCols; ++i) { - SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, i); - // it is a reserved column for scalar function, and no data in this column yet. - if (pDst->pData == NULL) { - continue; - } + if (!pBoolList) { + for (int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, i); + // it is a reserved column for scalar function, and no data in this column yet. + if (pDst->pData == NULL) { + continue; + } - int32_t numOfRows = 0; - if (IS_VAR_DATA_TYPE(pDst->info.type)) { - pDst->varmeta.length = 0; + int32_t numOfRows = 0; + if (IS_VAR_DATA_TYPE(pDst->info.type)) { + pDst->varmeta.length = 0; + } } - } - - if (NULL == pBoolList) { return; } - + for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, i); // it is a reserved column for scalar function, and no data in this column yet. - if (pDst->pData == NULL) { + if (pDst->pData == NULL || (IS_VAR_DATA_TYPE(pDst->info.type) && pDst->varmeta.length == 0)) { continue; } diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c index e04ba1515f..d220da0d84 100644 --- a/source/common/src/tdataformat.c +++ b/source/common/src/tdataformat.c @@ -610,9 +610,13 @@ _exit: return code; } -void tRowSort(SArray *aRowP) { - if (TARRAY_SIZE(aRowP) <= 1) return; - taosArraySort(aRowP, tRowPCmprFn); +int32_t tRowSort(SArray *aRowP) { + if (TARRAY_SIZE(aRowP) <= 1) return 0; + int32_t code = taosArrayMSort(aRowP, tRowPCmprFn); + if (code != TSDB_CODE_SUCCESS) { + uError("taosArrayMSort failed caused by %d", code); + } + return code; } int32_t tRowMerge(SArray *aRowP, STSchema *pTSchema, int8_t flag) { @@ -3590,5 +3594,5 @@ void (*tColDataCalcSMA[])(SColData *pColData, int64_t *sum, int64_t *max, int64_ NULL, // TSDB_DATA_TYPE_DECIMAL NULL, // TSDB_DATA_TYPE_BLOB NULL, // TSDB_DATA_TYPE_MEDIUMBLOB - NULL // TSDB_DATA_TYPE_GEOMETRY + tColDataCalcSMAVarType // TSDB_DATA_TYPE_GEOMETRY }; diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 3f86ff0347..3d7b38161a 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -1711,6 +1711,13 @@ void taosCfgDynamicOptions(const char *option, const char *value) { return; } + if (strcasecmp(option, "asynclog") == 0) { + int32_t newAsynclog = atoi(value); + uInfo("asynclog set from %d to %d", tsAsyncLog, newAsynclog); + tsAsyncLog = newAsynclog; + return; + } + const char *options[] = { "dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag", "tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tmrDebugFlag", "uDebugFlag", diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 0ba9539124..9b66bd1fb3 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -30,6 +30,32 @@ #include "tlog.h" +#define DECODESQL() \ + do { \ + if(!tDecodeIsEnd(&decoder)){ \ + if(tDecodeI32(&decoder, &pReq->sqlLen) < 0) return -1; \ + if(pReq->sqlLen > 0){ \ + if (tDecodeBinaryAlloc(&decoder, (void **)&pReq->sql, NULL) < 0) return -1; \ + } \ + } \ + } while (0) + +#define ENCODESQL() \ + do { \ + if (pReq->sqlLen > 0 && pReq->sql != NULL){ \ + if (tEncodeI32(&encoder, pReq->sqlLen) < 0) return -1; \ + if (tEncodeBinary(&encoder, pReq->sql, pReq->sqlLen) < 0) return -1; \ + } \ + } while (0) + +#define FREESQL() \ + do { \ + if(pReq->sql != NULL){ \ + taosMemoryFree(pReq->sql); \ + } \ + pReq->sql = NULL; \ + } while (0) + static int32_t tDecodeSVAlterTbReqCommon(SDecoder *pDecoder, SVAlterTbReq *pReq); static int32_t tDecodeSBatchDeleteReqCommon(SDecoder *pDecoder, SBatchDeleteReq *pReq); @@ -561,6 +587,8 @@ int32_t tSerializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pReq if (tEncodeI64(&encoder, pReq->deleteMark1) < 0) return -1; if (tEncodeI64(&encoder, pReq->deleteMark2) < 0) return -1; + ENCODESQL(); + tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -656,6 +684,8 @@ int32_t tDeserializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pR if (tDecodeI64(&decoder, &pReq->deleteMark1) < 0) return -1; if (tDecodeI64(&decoder, &pReq->deleteMark2) < 0) return -1; + DECODESQL(); + tEndDecode(&decoder); tDecoderClear(&decoder); return 0; @@ -668,6 +698,7 @@ void tFreeSMCreateStbReq(SMCreateStbReq *pReq) { taosMemoryFreeClear(pReq->pComment); taosMemoryFreeClear(pReq->pAst1); taosMemoryFreeClear(pReq->pAst2); + FREESQL(); } int32_t tSerializeSMDropStbReq(void *buf, int32_t bufLen, SMDropStbReq *pReq) { @@ -682,6 +713,7 @@ int32_t tSerializeSMDropStbReq(void *buf, int32_t bufLen, SMDropStbReq *pReq) { if (tEncodeI8(&encoder, pReq->reserved[i]) < 0) return -1; } if (tEncodeI64(&encoder, pReq->suid) < 0) return -1; + ENCODESQL(); tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -702,12 +734,18 @@ int32_t tDeserializeSMDropStbReq(void *buf, int32_t bufLen, SMDropStbReq *pReq) } if (tDecodeI64(&decoder, &pReq->suid) < 0) return -1; + DECODESQL(); + tEndDecode(&decoder); tDecoderClear(&decoder); return 0; } +void tFreeSMDropStbReq(SMDropStbReq *pReq) { + FREESQL(); +} + int32_t tSerializeSMAlterStbReq(void *buf, int32_t bufLen, SMAlterStbReq *pReq) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); @@ -727,6 +765,7 @@ int32_t tSerializeSMAlterStbReq(void *buf, int32_t bufLen, SMAlterStbReq *pReq) if (pReq->commentLen > 0) { if (tEncodeCStr(&encoder, pReq->comment) < 0) return -1; } + ENCODESQL(); tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -767,6 +806,8 @@ int32_t tDeserializeSMAlterStbReq(void *buf, int32_t bufLen, SMAlterStbReq *pReq if (tDecodeCStrTo(&decoder, pReq->comment) < 0) return -1; } + DECODESQL(); + tEndDecode(&decoder); tDecoderClear(&decoder); return 0; @@ -776,6 +817,7 @@ void tFreeSMAltertbReq(SMAlterStbReq *pReq) { taosArrayDestroy(pReq->pFields); pReq->pFields = NULL; taosMemoryFreeClear(pReq->comment); + FREESQL(); } int32_t tSerializeSEpSet(void *buf, int32_t bufLen, const SEpSet *pEpset) { @@ -1084,7 +1126,7 @@ int32_t tDeserializeSNotifyReq(void *buf, int32_t bufLen, SNotifyReq *pReq) { } code = 0; - + _exit: tEndDecode(&decoder); tDecoderClear(&decoder); @@ -1426,6 +1468,7 @@ int32_t tSerializeSDropUserReq(void *buf, int32_t bufLen, SDropUserReq *pReq) { if (tStartEncode(&encoder) < 0) return -1; if (tEncodeCStr(&encoder, pReq->user) < 0) return -1; + ENCODESQL(); tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -1439,12 +1482,17 @@ int32_t tDeserializeSDropUserReq(void *buf, int32_t bufLen, SDropUserReq *pReq) if (tStartDecode(&decoder) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->user) < 0) return -1; + DECODESQL(); tEndDecode(&decoder); tDecoderClear(&decoder); return 0; } +void tFreeSDropUserReq(SDropUserReq *pReq) { + FREESQL(); +} + SIpWhiteList *cloneIpWhiteList(SIpWhiteList *pIpWhiteList) { if (pIpWhiteList == NULL) return NULL; @@ -1470,6 +1518,8 @@ int32_t tSerializeSCreateUserReq(void *buf, int32_t bufLen, SCreateUserReq *pReq if (tEncodeU32(&encoder, pReq->pIpRanges[i].ip) < 0) return -1; if (tEncodeU32(&encoder, pReq->pIpRanges[i].mask) < 0) return -1; } + + ENCODESQL(); tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -1495,15 +1545,13 @@ int32_t tDeserializeSCreateUserReq(void *buf, int32_t bufLen, SCreateUserReq *pR if (tDecodeU32(&decoder, &(pReq->pIpRanges[i].ip)) < 0) return -1; if (tDecodeU32(&decoder, &(pReq->pIpRanges[i].mask)) < 0) return -1; } + + DECODESQL(); tEndDecode(&decoder); - - tDecoderClear(&decoder); return 0; } -void tFreeSCreateUserReq(SCreateUserReq *pReq) { taosMemoryFree(pReq->pIpRanges); } - int32_t tSerializeSUpdateIpWhite(void *buf, int32_t bufLen, SUpdateIpWhite *pReq) { // impl later SEncoder encoder = {0}; @@ -1602,6 +1650,7 @@ int32_t tSerializeRetrieveIpWhite(void *buf, int32_t bufLen, SRetrieveIpWhiteReq tEncoderClear(&encoder); return tlen; } + int32_t tDeserializeRetrieveIpWhite(void *buf, int32_t bufLen, SRetrieveIpWhiteReq *pReq) { SDecoder decoder = {0}; tDecoderInit(&decoder, buf, bufLen); @@ -1614,6 +1663,11 @@ int32_t tDeserializeRetrieveIpWhite(void *buf, int32_t bufLen, SRetrieveIpWhiteR return 0; } +void tFreeSCreateUserReq(SCreateUserReq *pReq) { + FREESQL(); + taosMemoryFreeClear(pReq->pIpRanges); +} + int32_t tSerializeSAlterUserReq(void *buf, int32_t bufLen, SAlterUserReq *pReq) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); @@ -1637,6 +1691,7 @@ int32_t tSerializeSAlterUserReq(void *buf, int32_t bufLen, SAlterUserReq *pReq) if (tEncodeU32(&encoder, pReq->pIpRanges[i].ip) < 0) return -1; if (tEncodeU32(&encoder, pReq->pIpRanges[i].mask) < 0) return -1; } + ENCODESQL(); tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -1673,6 +1728,7 @@ int32_t tDeserializeSAlterUserReq(void *buf, int32_t bufLen, SAlterUserReq *pReq if (tDecodeU32(&decoder, &(pReq->pIpRanges[i].ip)) < 0) return -1; if (tDecodeU32(&decoder, &(pReq->pIpRanges[i].mask)) < 0) return -1; } + DECODESQL(); tEndDecode(&decoder); tDecoderClear(&decoder); @@ -1682,6 +1738,7 @@ int32_t tDeserializeSAlterUserReq(void *buf, int32_t bufLen, SAlterUserReq *pReq void tFreeSAlterUserReq(SAlterUserReq *pReq) { taosMemoryFreeClear(pReq->tagCond); taosMemoryFree(pReq->pIpRanges); + FREESQL(); } int32_t tSerializeSGetUserAuthReq(void *buf, int32_t bufLen, SGetUserAuthReq *pReq) { @@ -2041,6 +2098,7 @@ int32_t tSerializeSCreateDropMQSNodeReq(void *buf, int32_t bufLen, SMCreateQnode if (tStartEncode(&encoder) < 0) return -1; if (tEncodeI32(&encoder, pReq->dnodeId) < 0) return -1; + ENCODESQL(); tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -2054,12 +2112,21 @@ int32_t tDeserializeSCreateDropMQSNodeReq(void *buf, int32_t bufLen, SMCreateQno if (tStartDecode(&decoder) < 0) return -1; if (tDecodeI32(&decoder, &pReq->dnodeId) < 0) return -1; + DECODESQL(); tEndDecode(&decoder); tDecoderClear(&decoder); return 0; } +void tFreeSMCreateQnodeReq(SMCreateQnodeReq *pReq){ + FREESQL(); +} + +void tFreeSDDropQnodeReq(SDDropQnodeReq* pReq) { + FREESQL(); +} + int32_t tSerializeSDropDnodeReq(void *buf, int32_t bufLen, SDropDnodeReq *pReq) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); @@ -2070,6 +2137,7 @@ int32_t tSerializeSDropDnodeReq(void *buf, int32_t bufLen, SDropDnodeReq *pReq) if (tEncodeI32(&encoder, pReq->port) < 0) return -1; if (tEncodeI8(&encoder, pReq->force) < 0) return -1; if (tEncodeI8(&encoder, pReq->unsafe) < 0) return -1; + ENCODESQL(); tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -2092,12 +2160,17 @@ int32_t tDeserializeSDropDnodeReq(void *buf, int32_t bufLen, SDropDnodeReq *pReq pReq->unsafe = false; } + DECODESQL(); tEndDecode(&decoder); tDecoderClear(&decoder); return 0; } +void tFreeSDropDnodeReq(SDropDnodeReq *pReq) { + FREESQL(); +} + int32_t tSerializeSRestoreDnodeReq(void *buf, int32_t bufLen, SRestoreDnodeReq *pReq) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); @@ -2105,6 +2178,7 @@ int32_t tSerializeSRestoreDnodeReq(void *buf, int32_t bufLen, SRestoreDnodeReq * if (tStartEncode(&encoder) < 0) return -1; if (tEncodeI32(&encoder, pReq->dnodeId) < 0) return -1; if (tEncodeI8(&encoder, pReq->restoreType) < 0) return -1; + ENCODESQL(); tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -2119,12 +2193,17 @@ int32_t tDeserializeSRestoreDnodeReq(void *buf, int32_t bufLen, SRestoreDnodeReq if (tStartDecode(&decoder) < 0) return -1; if (tDecodeI32(&decoder, &pReq->dnodeId) < 0) return -1; if (tDecodeI8(&decoder, &pReq->restoreType) < 0) return -1; + DECODESQL(); tEndDecode(&decoder); tDecoderClear(&decoder); return 0; } +void tFreeSRestoreDnodeReq(SRestoreDnodeReq *pReq) { + FREESQL(); +} + int32_t tSerializeSMCfgDnodeReq(void *buf, int32_t bufLen, SMCfgDnodeReq *pReq) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); @@ -2133,6 +2212,7 @@ int32_t tSerializeSMCfgDnodeReq(void *buf, int32_t bufLen, SMCfgDnodeReq *pReq) if (tEncodeI32(&encoder, pReq->dnodeId) < 0) return -1; if (tEncodeCStr(&encoder, pReq->config) < 0) return -1; if (tEncodeCStr(&encoder, pReq->value) < 0) return -1; + ENCODESQL(); tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -2148,12 +2228,17 @@ int32_t tDeserializeSMCfgDnodeReq(void *buf, int32_t bufLen, SMCfgDnodeReq *pReq if (tDecodeI32(&decoder, &pReq->dnodeId) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->config) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->value) < 0) return -1; + DECODESQL(); tEndDecode(&decoder); tDecoderClear(&decoder); return 0; } +void tFreeSMCfgDnodeReq(SMCfgDnodeReq *pReq) { + FREESQL(); +} + int32_t tSerializeSDCfgDnodeReq(void *buf, int32_t bufLen, SDCfgDnodeReq *pReq) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); @@ -2188,6 +2273,7 @@ int32_t tSerializeSCreateDnodeReq(void *buf, int32_t bufLen, SCreateDnodeReq *pR if (tStartEncode(&encoder) < 0) return -1; if (tEncodeCStr(&encoder, pReq->fqdn) < 0) return -1; if (tEncodeI32(&encoder, pReq->port) < 0) return -1; + ENCODESQL(); tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -2202,12 +2288,17 @@ int32_t tDeserializeSCreateDnodeReq(void *buf, int32_t bufLen, SCreateDnodeReq * if (tStartDecode(&decoder) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->fqdn) < 0) return -1; if (tDecodeI32(&decoder, &pReq->port) < 0) return -1; + DECODESQL(); tEndDecode(&decoder); tDecoderClear(&decoder); return 0; } +void tFreeSCreateDnodeReq(SCreateDnodeReq *pReq) { + FREESQL(); +} + int32_t tSerializeSCreateFuncReq(void *buf, int32_t bufLen, SCreateFuncReq *pReq) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); @@ -2695,6 +2786,8 @@ int32_t tSerializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq) { } if (tEncodeI32(&encoder, pReq->tsdbPageSize) < 0) return -1; if (tEncodeI32(&encoder, pReq->keepTimeOffset) < 0) return -1; + + ENCODESQL(); tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -2762,6 +2855,8 @@ int32_t tDeserializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq) if (tDecodeI32(&decoder, &pReq->keepTimeOffset) < 0) return -1; } + DECODESQL(); + tEndDecode(&decoder); tDecoderClear(&decoder); @@ -2771,6 +2866,7 @@ int32_t tDeserializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq) void tFreeSCreateDbReq(SCreateDbReq *pReq) { taosArrayDestroy(pReq->pRetensions); pReq->pRetensions = NULL; + FREESQL(); } int32_t tSerializeSAlterDbReq(void *buf, int32_t bufLen, SAlterDbReq *pReq) { @@ -2800,6 +2896,7 @@ int32_t tSerializeSAlterDbReq(void *buf, int32_t bufLen, SAlterDbReq *pReq) { if (tEncodeI32(&encoder, pReq->walRetentionPeriod) < 0) return -1; if (tEncodeI32(&encoder, pReq->walRetentionSize) < 0) return -1; if (tEncodeI32(&encoder, pReq->keepTimeOffset) < 0) return -1; + ENCODESQL(); tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -2847,12 +2944,18 @@ int32_t tDeserializeSAlterDbReq(void *buf, int32_t bufLen, SAlterDbReq *pReq) { if (!tDecodeIsEnd(&decoder)) { if (tDecodeI32(&decoder, &pReq->keepTimeOffset) < 0) return -1; } + + DECODESQL(); tEndDecode(&decoder); tDecoderClear(&decoder); return 0; } +void tFreeSAlterDbReq(SAlterDbReq *pReq) { + FREESQL(); +} + int32_t tSerializeSDropDbReq(void *buf, int32_t bufLen, SDropDbReq *pReq) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); @@ -2860,6 +2963,7 @@ int32_t tSerializeSDropDbReq(void *buf, int32_t bufLen, SDropDbReq *pReq) { if (tStartEncode(&encoder) < 0) return -1; if (tEncodeCStr(&encoder, pReq->db) < 0) return -1; if (tEncodeI8(&encoder, pReq->ignoreNotExists) < 0) return -1; + ENCODESQL(); tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -2874,12 +2978,17 @@ int32_t tDeserializeSDropDbReq(void *buf, int32_t bufLen, SDropDbReq *pReq) { if (tStartDecode(&decoder) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->db) < 0) return -1; if (tDecodeI8(&decoder, &pReq->ignoreNotExists) < 0) return -1; + DECODESQL(); tEndDecode(&decoder); tDecoderClear(&decoder); return 0; } +void tFreeSDropDbReq(SDropDbReq *pReq) { + FREESQL(); +} + int32_t tSerializeSDropDbRsp(void *buf, int32_t bufLen, SDropDbRsp *pRsp) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); @@ -3134,6 +3243,7 @@ int32_t tSerializeSCompactDbReq(void *buf, int32_t bufLen, SCompactDbReq *pReq) if (tEncodeCStr(&encoder, pReq->db) < 0) return -1; if (tEncodeI64(&encoder, pReq->timeRange.skey) < 0) return -1; if (tEncodeI64(&encoder, pReq->timeRange.ekey) < 0) return -1; + ENCODESQL(); tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -3149,12 +3259,17 @@ int32_t tDeserializeSCompactDbReq(void *buf, int32_t bufLen, SCompactDbReq *pReq if (tDecodeCStrTo(&decoder, pReq->db) < 0) return -1; if (tDecodeI64(&decoder, &pReq->timeRange.skey) < 0) return -1; if (tDecodeI64(&decoder, &pReq->timeRange.ekey) < 0) return -1; + DECODESQL(); tEndDecode(&decoder); tDecoderClear(&decoder); return 0; } +void tFreeSCompactDbReq(SCompactDbReq *pReq) { + FREESQL(); +} + int32_t tSerializeSUseDbRspImp(SEncoder *pEncoder, const SUseDbRsp *pRsp) { if (tEncodeCStr(pEncoder, pRsp->db) < 0) return -1; if (tEncodeI64(pEncoder, pRsp->uid) < 0) return -1; @@ -4305,6 +4420,7 @@ int32_t tSerializeSMDropTopicReq(void *buf, int32_t bufLen, SMDropTopicReq *pReq if (tStartEncode(&encoder) < 0) return -1; if (tEncodeCStr(&encoder, pReq->name) < 0) return -1; if (tEncodeI8(&encoder, pReq->igNotExists) < 0) return -1; + ENCODESQL(); tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -4319,12 +4435,17 @@ int32_t tDeserializeSMDropTopicReq(void *buf, int32_t bufLen, SMDropTopicReq *pR if (tStartDecode(&decoder) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1; if (tDecodeI8(&decoder, &pReq->igNotExists) < 0) return -1; + DECODESQL(); tEndDecode(&decoder); tDecoderClear(&decoder); return 0; } +void tFreeSMDropTopicReq(SMDropTopicReq *pReq) { + FREESQL(); +} + int32_t tSerializeSMDropCgroupReq(void *buf, int32_t bufLen, SMDropCgroupReq *pReq) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); @@ -5215,6 +5336,7 @@ int32_t tSerializeSBalanceVgroupReq(void *buf, int32_t bufLen, SBalanceVgroupReq if (tStartEncode(&encoder) < 0) return -1; if (tEncodeI32(&encoder, pReq->useless) < 0) return -1; + ENCODESQL(); tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -5228,12 +5350,17 @@ int32_t tDeserializeSBalanceVgroupReq(void *buf, int32_t bufLen, SBalanceVgroupR if (tStartDecode(&decoder) < 0) return -1; if (tDecodeI32(&decoder, &pReq->useless) < 0) return -1; + DECODESQL(); tEndDecode(&decoder); tDecoderClear(&decoder); return 0; } +void tFreeSBalanceVgroupReq(SBalanceVgroupReq *pReq) { + FREESQL(); +} + int32_t tSerializeSBalanceVgroupLeaderReq(void *buf, int32_t bufLen, SBalanceVgroupLeaderReq *pReq) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); @@ -5241,6 +5368,7 @@ int32_t tSerializeSBalanceVgroupLeaderReq(void *buf, int32_t bufLen, SBalanceVgr if (tStartEncode(&encoder) < 0) return -1; if (tEncodeI32(&encoder, pReq->useless) < 0) return -1; if (tEncodeI32(&encoder, pReq->vgId) < 0) return -1; + ENCODESQL(); tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -5257,12 +5385,18 @@ int32_t tDeserializeSBalanceVgroupLeaderReq(void *buf, int32_t bufLen, SBalanceV if(!tDecodeIsEnd(&decoder)){ if (tDecodeI32(&decoder, &pReq->vgId) < 0) return -1; } + + DECODESQL(); tEndDecode(&decoder); tDecoderClear(&decoder); return 0; } +void tFreeSBalanceVgroupLeaderReq(SBalanceVgroupLeaderReq *pReq) { + FREESQL(); +} + int32_t tSerializeSMergeVgroupReq(void *buf, int32_t bufLen, SMergeVgroupReq *pReq) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); @@ -5299,6 +5433,7 @@ int32_t tSerializeSRedistributeVgroupReq(void *buf, int32_t bufLen, SRedistribut if (tEncodeI32(&encoder, pReq->dnodeId1) < 0) return -1; if (tEncodeI32(&encoder, pReq->dnodeId2) < 0) return -1; if (tEncodeI32(&encoder, pReq->dnodeId3) < 0) return -1; + ENCODESQL(); tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -5315,12 +5450,17 @@ int32_t tDeserializeSRedistributeVgroupReq(void *buf, int32_t bufLen, SRedistrib if (tDecodeI32(&decoder, &pReq->dnodeId1) < 0) return -1; if (tDecodeI32(&decoder, &pReq->dnodeId2) < 0) return -1; if (tDecodeI32(&decoder, &pReq->dnodeId3) < 0) return -1; + DECODESQL(); tEndDecode(&decoder); tDecoderClear(&decoder); return 0; } +void tFreeSRedistributeVgroupReq(SRedistributeVgroupReq *pReq) { + FREESQL(); +} + int32_t tSerializeSSplitVgroupReq(void *buf, int32_t bufLen, SSplitVgroupReq *pReq) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); @@ -5943,6 +6083,7 @@ int32_t tDeserializeSOperatorParam(SDecoder *pDecoder, SOperatorParam *pOpParam) if (uidNum > 0) { pScan->pUidList = taosArrayInit(uidNum, sizeof(int64_t)); if (NULL == pScan->pUidList) return -1; + for (int32_t m = 0; m < uidNum; ++m) { if (tDecodeI64(pDecoder, &uid) < 0) return -1; taosArrayPush(pScan->pUidList, &uid); @@ -5959,6 +6100,7 @@ int32_t tDeserializeSOperatorParam(SDecoder *pDecoder, SOperatorParam *pOpParam) int32_t childrenNum = 0; if (tDecodeI32(pDecoder, &childrenNum) < 0) return -1; + if (childrenNum > 0) { pOpParam->pChildren = taosArrayInit(childrenNum, POINTER_BYTES); if (NULL == pOpParam->pChildren) return -1; @@ -6836,6 +6978,8 @@ int32_t tSerializeSMDropStreamReq(void *buf, int32_t bufLen, const SMDropStreamR if (tEncodeCStr(&encoder, pReq->name) < 0) return -1; if (tEncodeI8(&encoder, pReq->igNotExists) < 0) return -1; + ENCODESQL(); + tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -6851,12 +6995,18 @@ int32_t tDeserializeSMDropStreamReq(void *buf, int32_t bufLen, SMDropStreamReq * if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1; if (tDecodeI8(&decoder, &pReq->igNotExists) < 0) return -1; + DECODESQL(); + tEndDecode(&decoder); tDecoderClear(&decoder); return 0; } +void tFreeSMDropStreamReq(SMDropStreamReq *pReq) { + FREESQL(); +} + int32_t tSerializeSMRecoverStreamReq(void *buf, int32_t bufLen, const SMRecoverStreamReq *pReq) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); @@ -6998,6 +7148,11 @@ int tEncodeSVCreateTbReq(SEncoder *pCoder, const SVCreateTbReq *pReq) { } else { ASSERT(0); } + //ENCODESQL + if(pReq->sqlLen > 0 && pReq->sql != NULL) { + if (tEncodeI32(pCoder, pReq->sqlLen) < 0) return -1; + if (tEncodeBinary(pCoder, pReq->sql, pReq->sqlLen) < 0) return -1; + } tEndEncode(pCoder); return 0; @@ -7041,6 +7196,14 @@ int tDecodeSVCreateTbReq(SDecoder *pCoder, SVCreateTbReq *pReq) { ASSERT(0); } + //DECODESQL + if(!tDecodeIsEnd(pCoder)){ + if(tDecodeI32(pCoder, &pReq->sqlLen) < 0) return -1; + if(pReq->sqlLen > 0){ + if (tDecodeBinaryAlloc(pCoder, (void**)&pReq->sql, NULL) < 0) return -1; + } + } + tEndDecode(pCoder); return 0; } @@ -7062,6 +7225,11 @@ void tDestroySVCreateTbReq(SVCreateTbReq *pReq, int32_t flags) { if (pReq->ntb.schemaRow.pSchema) taosMemoryFree(pReq->ntb.schemaRow.pSchema); } } + + if(pReq->sql != NULL){ + taosMemoryFree(pReq->sql); + } + pReq->sql = NULL; } int tEncodeSVCreateTbBatchReq(SEncoder *pCoder, const SVCreateTbBatchReq *pReq) { diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index 7d65ac424f..425218f0e1 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -756,7 +756,8 @@ int32_t taosTimeCountIntervalForFill(int64_t skey, int64_t ekey, int64_t interva } int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval) { - if (pInterval->sliding == 0 && pInterval->interval == 0) { + if (pInterval->sliding == 0) { + ASSERT(pInterval->interval == 0); return ts; } diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c index 40753fffcf..24b5b2566c 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c @@ -80,15 +80,18 @@ int32_t mmProcessDropReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) { if (pInput->pData->dnodeId != 0 && dropReq.dnodeId != pInput->pData->dnodeId) { terrno = TSDB_CODE_INVALID_OPTION; dGError("failed to drop mnode since %s", terrstr()); + tFreeSMCreateQnodeReq(&dropReq); return -1; } SMnodeOpt option = {.deploy = false}; if (mmWriteFile(pInput->path, &option) != 0) { dGError("failed to write mnode file since %s", terrstr()); + tFreeSMCreateQnodeReq(&dropReq); return -1; } + tFreeSMCreateQnodeReq(&dropReq); return 0; } diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c b/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c index 86bc11c616..82876d6886 100644 --- a/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c +++ b/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c @@ -39,15 +39,18 @@ int32_t qmProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) { if (pInput->pData->dnodeId != 0 && createReq.dnodeId != pInput->pData->dnodeId) { terrno = TSDB_CODE_INVALID_OPTION; dError("failed to create qnode since %s", terrstr()); + tFreeSMCreateQnodeReq(&createReq); return -1; } bool deployed = true; if (dmWriteFile(pInput->path, pInput->name, deployed) != 0) { dError("failed to write qnode file since %s", terrstr()); + tFreeSMCreateQnodeReq(&createReq); return -1; } + tFreeSMCreateQnodeReq(&createReq); return 0; } @@ -61,15 +64,18 @@ int32_t qmProcessDropReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) { if (pInput->pData->dnodeId != 0 && dropReq.dnodeId != pInput->pData->dnodeId) { terrno = TSDB_CODE_INVALID_OPTION; dError("failed to drop qnode since %s", terrstr()); + tFreeSMCreateQnodeReq(&dropReq); return -1; } bool deployed = false; if (dmWriteFile(pInput->path, pInput->name, deployed) != 0) { dError("failed to write qnode file since %s", terrstr()); + tFreeSMCreateQnodeReq(&dropReq); return -1; } + tFreeSMCreateQnodeReq(&dropReq); return 0; } diff --git a/source/dnode/mgmt/mgmt_snode/src/smHandle.c b/source/dnode/mgmt/mgmt_snode/src/smHandle.c index 13b81231d4..b29c5c1eb4 100644 --- a/source/dnode/mgmt/mgmt_snode/src/smHandle.c +++ b/source/dnode/mgmt/mgmt_snode/src/smHandle.c @@ -28,15 +28,18 @@ int32_t smProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) { if (pInput->pData->dnodeId != 0 && createReq.dnodeId != pInput->pData->dnodeId) { terrno = TSDB_CODE_INVALID_OPTION; dError("failed to create snode since %s", terrstr()); + tFreeSMCreateQnodeReq(&createReq); return -1; } bool deployed = true; if (dmWriteFile(pInput->path, pInput->name, deployed) != 0) { dError("failed to write snode file since %s", terrstr()); + tFreeSMCreateQnodeReq(&createReq); return -1; } + tFreeSMCreateQnodeReq(&createReq); return 0; } @@ -50,15 +53,18 @@ int32_t smProcessDropReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) { if (pInput->pData->dnodeId != 0 && dropReq.dnodeId != pInput->pData->dnodeId) { terrno = TSDB_CODE_INVALID_OPTION; dError("failed to drop snode since %s", terrstr()); + tFreeSMCreateQnodeReq(&dropReq); return -1; } bool deployed = false; if (dmWriteFile(pInput->path, pInput->name, deployed) != 0) { dError("failed to write snode file since %s", terrstr()); + tFreeSMCreateQnodeReq(&dropReq); return -1; } + tFreeSMCreateQnodeReq(&dropReq); return 0; } diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index e31237fc67..f4236964ca 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -672,6 +672,7 @@ typedef struct { char name[TSDB_STREAM_FNAME_LEN]; // ctl SRWLatch lock; + // create info int64_t createTime; int64_t updateTime; diff --git a/source/dnode/mnode/impl/inc/mndUser.h b/source/dnode/mnode/impl/inc/mndUser.h index 1aa01fd59d..c01741b350 100644 --- a/source/dnode/mnode/impl/inc/mndUser.h +++ b/source/dnode/mnode/impl/inc/mndUser.h @@ -40,6 +40,7 @@ SHashObj *mndDupTopicHash(SHashObj *pOld); int32_t mndValidateUserAuthInfo(SMnode *pMnode, SUserAuthVersion *pUsers, int32_t numOfUses, void **ppRsp, int32_t *pRspLen); int32_t mndUserRemoveDb(SMnode *pMnode, STrans *pTrans, char *db); +int32_t mndUserRemoveStb(SMnode *pMnode, STrans *pTrans, char *stb); int32_t mndUserRemoveTopic(SMnode *pMnode, STrans *pTrans, char *topic); int32_t mndUserDupObj(SUserObj *pUser, SUserObj *pNew); diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index a27de37daf..58c8a271bf 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -759,45 +759,10 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) { code = mndCreateDb(pMnode, pReq, &createReq, pUser); if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS; - char detail[3000] = {0}; - char tmp[100] = {0}; - - mndBuildAuditDetailInt32(detail, tmp, "buffer:%d", createReq.buffer); - mndBuildAuditDetailInt32(detail, tmp, "cacheLast:%d", createReq.cacheLast); - mndBuildAuditDetailInt32(detail, tmp, "cacheLastSize:%d", createReq.cacheLastSize); - mndBuildAuditDetailInt32(detail, tmp, "compression:%d", createReq.compression); - mndBuildAuditDetailInt32(detail, tmp, "daysPerFile:%d", createReq.daysPerFile); - mndBuildAuditDetailInt32(detail, tmp, "daysToKeep0:%d", createReq.daysToKeep0); - mndBuildAuditDetailInt32(detail, tmp, "daysToKeep1:%d", createReq.daysToKeep1); - mndBuildAuditDetailInt32(detail, tmp, "daysToKeep2:%d", createReq.daysToKeep2); - mndBuildAuditDetailInt32(detail, tmp, "keepTimeOffset:%d", createReq.keepTimeOffset); - mndBuildAuditDetailInt32(detail, tmp, "hashPrefix:%d", createReq.hashPrefix); - mndBuildAuditDetailInt32(detail, tmp, "hashSuffix:%d", createReq.hashSuffix); - mndBuildAuditDetailInt32(detail, tmp, "ignoreExist:%d", createReq.ignoreExist); - mndBuildAuditDetailInt32(detail, tmp, "maxRows:%d", createReq.maxRows); - mndBuildAuditDetailInt32(detail, tmp, "minRows:%d", createReq.minRows); - mndBuildAuditDetailInt32(detail, tmp, "numOfRetensions:%d", createReq.numOfRetensions); - mndBuildAuditDetailInt32(detail, tmp, "numOfStables:%d", createReq.numOfStables); - mndBuildAuditDetailInt32(detail, tmp, "numOfVgroups:%d", createReq.numOfVgroups); - mndBuildAuditDetailInt32(detail, tmp, "pages:%d", createReq.pages); - mndBuildAuditDetailInt32(detail, tmp, "pageSize:%d", createReq.pageSize); - mndBuildAuditDetailInt32(detail, tmp, "precision:%d", createReq.precision); - mndBuildAuditDetailInt32(detail, tmp, "replications:%d", createReq.replications); - mndBuildAuditDetailInt32(detail, tmp, "schemaless:%d", createReq.schemaless); - mndBuildAuditDetailInt32(detail, tmp, "sstTrigger:%d", createReq.sstTrigger); - mndBuildAuditDetailInt32(detail, tmp, "strict:%d", createReq.strict); - mndBuildAuditDetailInt32(detail, tmp, "tsdbPageSize:%d", createReq.tsdbPageSize); - mndBuildAuditDetailInt32(detail, tmp, "walFsyncPeriod:%d", createReq.walFsyncPeriod); - mndBuildAuditDetailInt32(detail, tmp, "walLevel:%d", createReq.walLevel); - mndBuildAuditDetailInt32(detail, tmp, "walRetentionPeriod:%d", createReq.walRetentionPeriod); - mndBuildAuditDetailInt32(detail, tmp, "walRetentionSize:%" PRId64, createReq.walRetentionSize); - mndBuildAuditDetailInt32(detail, tmp, "walRollPeriod:%d", createReq.walRollPeriod); - mndBuildAuditDetailInt32(detail, tmp, "walSegmentSize:%" PRId64, createReq.walSegmentSize); - SName name = {0}; tNameFromString(&name, createReq.db, T_NAME_ACCT | T_NAME_DB); - auditRecord(pReq, pMnode->clusterId, "createDB", name.dbname, "", detail); + auditRecord(pReq, pMnode->clusterId, "createDB", name.dbname, "", createReq.sql, createReq.sqlLen); _OVER: if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { @@ -1049,30 +1014,10 @@ static int32_t mndProcessAlterDbReq(SRpcMsg *pReq) { if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS; } - char detail[3000] = {0}; - char tmp[100] = {0}; - - mndBuildAuditDetailInt32(detail, tmp, "buffer:%d", alterReq.buffer); - mndBuildAuditDetailInt32(detail, tmp, "cacheLast:%d", alterReq.cacheLast); - mndBuildAuditDetailInt32(detail, tmp, "cacheLastSize:%d", alterReq.cacheLastSize); - mndBuildAuditDetailInt32(detail, tmp, "daysPerFile:%d", alterReq.daysPerFile); - mndBuildAuditDetailInt32(detail, tmp, "daysToKeep0:%d", alterReq.daysToKeep0); - mndBuildAuditDetailInt32(detail, tmp, "daysToKeep1:%d", alterReq.daysToKeep1); - mndBuildAuditDetailInt32(detail, tmp, "daysToKeep2:%d", alterReq.daysToKeep2); - mndBuildAuditDetailInt32(detail, tmp, "keepTimeOffset:%d", alterReq.keepTimeOffset); - mndBuildAuditDetailInt32(detail, tmp, "minRows:%d", alterReq.minRows); - mndBuildAuditDetailInt32(detail, tmp, "pages:%d", alterReq.pages); - mndBuildAuditDetailInt32(detail, tmp, "pageSize:%d", alterReq.pageSize); - mndBuildAuditDetailInt32(detail, tmp, "replications:%d", alterReq.replications); - mndBuildAuditDetailInt32(detail, tmp, "sstTrigger:%d", alterReq.sstTrigger); - mndBuildAuditDetailInt32(detail, tmp, "strict:%d", alterReq.strict); - mndBuildAuditDetailInt32(detail, tmp, "walFsyncPeriod:%d", alterReq.walFsyncPeriod); - mndBuildAuditDetailInt32(detail, tmp, "walRetentionSize:%d", alterReq.walRetentionSize); - SName name = {0}; tNameFromString(&name, alterReq.db, T_NAME_ACCT | T_NAME_DB); - auditRecord(pReq, pMnode->clusterId, "alterDB", name.dbname, "", detail); + auditRecord(pReq, pMnode->clusterId, "alterDB", name.dbname, "", alterReq.sql, alterReq.sqlLen); _OVER: if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { @@ -1082,6 +1027,7 @@ _OVER: mndReleaseDb(pMnode, pDb); taosArrayDestroy(dbObj.cfg.pRetensions); + tFreeSAlterDbReq(&alterReq); terrno = code; return code; @@ -1364,13 +1310,10 @@ static int32_t mndProcessDropDbReq(SRpcMsg *pReq) { code = TSDB_CODE_ACTION_IN_PROGRESS; } - char detail[1000] = {0}; - sprintf(detail, "ignoreNotExists:%d", dropReq.ignoreNotExists); - SName name = {0}; tNameFromString(&name, dropReq.db, T_NAME_ACCT | T_NAME_DB); - auditRecord(pReq, pMnode->clusterId, "dropDB", name.dbname, "", detail); + auditRecord(pReq, pMnode->clusterId, "dropDB", name.dbname, "", dropReq.sql, dropReq.sqlLen); _OVER: if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_ACTION_IN_PROGRESS) { @@ -1378,6 +1321,7 @@ _OVER: } mndReleaseDb(pMnode, pDb); + tFreeSDropDbReq(&dropReq); return code; } diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 5cebd7957b..b53dee7bff 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -1025,7 +1025,7 @@ static int32_t mndProcessCreateDnodeReq(SRpcMsg *pReq) { char obj[200] = {0}; sprintf(obj, "%s:%d", createReq.fqdn, createReq.port); - auditRecord(pReq, pMnode->clusterId, "createDnode", obj, "", ""); + auditRecord(pReq, pMnode->clusterId, "createDnode", obj, "", createReq.sql, createReq.sqlLen); _OVER: if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { @@ -1033,6 +1033,7 @@ _OVER: } mndReleaseDnode(pMnode, pDnode); + tFreeSCreateDnodeReq(&createReq); return code; } @@ -1173,13 +1174,7 @@ static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq) { char obj1[30] = {0}; sprintf(obj1, "%d", dropReq.dnodeId); - // char obj2[150] = {0}; - // sprintf(obj2, "%s:%d", dropReq.fqdn, dropReq.port); - - char detail[100] = {0}; - sprintf(detail, "force:%d, unsafe:%d", dropReq.force, dropReq.unsafe); - - auditRecord(pReq, pMnode->clusterId, "dropDnode", obj1, "", detail); + auditRecord(pReq, pMnode->clusterId, "dropDnode", obj1, "", dropReq.sql, dropReq.sqlLen); _OVER: if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { @@ -1190,6 +1185,7 @@ _OVER: mndReleaseMnode(pMnode, pMObj); mndReleaseQnode(pMnode, pQObj); mndReleaseSnode(pMnode, pSObj); + tFreeSDropDnodeReq(&dropReq); return code; } @@ -1210,6 +1206,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { mInfo("dnode:%d, start to config, option:%s, value:%s", cfgReq.dnodeId, cfgReq.config, cfgReq.value); if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CONFIG_DNODE) != 0) { + tFreeSMCfgDnodeReq(&cfgReq); return -1; } @@ -1220,6 +1217,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { if (' ' != cfgReq.config[7] && 0 != cfgReq.config[7]) { mError("dnode:%d, failed to config monitor since invalid conf:%s", cfgReq.dnodeId, cfgReq.config); terrno = TSDB_CODE_INVALID_CFG; + tFreeSMCfgDnodeReq(&cfgReq); return -1; } @@ -1231,6 +1229,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { if (flag < 0 || flag > 2) { mError("dnode:%d, failed to config monitor since value:%d", cfgReq.dnodeId, flag); terrno = TSDB_CODE_INVALID_CFG; + tFreeSMCfgDnodeReq(&cfgReq); return -1; } @@ -1246,6 +1245,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { mError("dnode:%d, failed to config ttlPushInterval since value:%d. Valid range: [0, 100000]", cfgReq.dnodeId, flag); terrno = TSDB_CODE_INVALID_CFG; + tFreeSMCfgDnodeReq(&cfgReq); return -1; } @@ -1261,11 +1261,27 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { mError("dnode:%d, failed to config ttlBatchDropNum since value:%d. Valid range: [0, %d]", cfgReq.dnodeId, flag, INT32_MAX); terrno = TSDB_CODE_INVALID_CFG; + tFreeSMCfgDnodeReq(&cfgReq); return -1; } strcpy(dcfgReq.config, "ttlbatchdropnum"); snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%d", flag); + } else if (strncasecmp(cfgReq.config, "asynclog", 8) == 0) { + int32_t optLen = strlen("asynclog"); + int32_t flag = -1; + int32_t code = mndMCfgGetValInt32(&cfgReq, optLen, &flag); + if (code < 0) return code; + + if (flag < 0 || flag > 1) { + mError("dnode:%d, failed to config asynclog since value:%d. Valid range: [0, 1]", cfgReq.dnodeId, flag); + terrno = TSDB_CODE_INVALID_CFG; + tFreeSMCfgDnodeReq(&cfgReq); + return -1; + } + + strcpy(dcfgReq.config, "asynclog"); + snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%d", flag); #ifdef TD_ENTERPRISE } else if (strncasecmp(cfgReq.config, "supportvnodes", 13) == 0) { int32_t optLen = strlen("supportvnodes"); @@ -1276,6 +1292,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { if (flag < 0 || flag > 4096) { mError("dnode:%d, failed to config supportVnodes since value:%d. Valid range: [0, 4096]", cfgReq.dnodeId, flag); terrno = TSDB_CODE_INVALID_CFG; + tFreeSMCfgDnodeReq(&cfgReq); return -1; } if (flag == 0) { @@ -1291,6 +1308,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { if (' ' != cfgReq.config[index] && 0 != cfgReq.config[index]) { mError("dnode:%d, failed to config activeCode since invalid conf:%s", cfgReq.dnodeId, cfgReq.config); terrno = TSDB_CODE_INVALID_CFG; + tFreeSMCfgDnodeReq(&cfgReq); return -1; } int32_t vlen = strlen(cfgReq.value); @@ -1300,6 +1318,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { mError("dnode:%d, failed to config activeCode since invalid vlen:%d. conf:%s, val:%s", cfgReq.dnodeId, vlen, cfgReq.config, cfgReq.value); terrno = TSDB_CODE_INVALID_OPTION; + tFreeSMCfgDnodeReq(&cfgReq); return -1; } @@ -1307,10 +1326,11 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%s", cfgReq.value); if (mndConfigDnode(pMnode, pReq, &cfgReq, opt) != 0) { - mError("dnode:%d, failed to config activeCode since %s. conf:%s, val:%s", cfgReq.dnodeId, terrstr(), - cfgReq.config, cfgReq.value); + mError("dnode:%d, failed to config activeCode since %s", cfgReq.dnodeId, terrstr()); + tFreeSMCfgDnodeReq(&cfgReq); return -1; } + tFreeSMCfgDnodeReq(&cfgReq); return 0; #endif } else { @@ -1323,6 +1343,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { if (' ' != cfgReq.config[optLen] && 0 != cfgReq.config[optLen]) { mError("dnode:%d, failed to config since invalid conf:%s", cfgReq.dnodeId, cfgReq.config); terrno = TSDB_CODE_INVALID_CFG; + tFreeSMCfgDnodeReq(&cfgReq); return -1; } @@ -1334,6 +1355,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { if (flag < 0 || flag > 255) { mError("dnode:%d, failed to config %s since value:%d", cfgReq.dnodeId, optName, flag); terrno = TSDB_CODE_INVALID_CFG; + tFreeSMCfgDnodeReq(&cfgReq); return -1; } @@ -1345,6 +1367,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { if (!findOpt) { terrno = TSDB_CODE_INVALID_CFG; mError("dnode:%d, failed to config since %s", cfgReq.dnodeId, terrstr()); + tFreeSMCfgDnodeReq(&cfgReq); return -1; } } @@ -1352,10 +1375,9 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { char obj[50] = {0}; sprintf(obj, "%d", cfgReq.dnodeId); - char detail[500] = {0}; - sprintf(detail, "config:%s, value:%s", cfgReq.config, cfgReq.value); + auditRecord(pReq, pMnode->clusterId, "alterDnode", obj, "", cfgReq.sql, cfgReq.sqlLen); - auditRecord(pReq, pMnode->clusterId, "alterDnode", obj, "", detail); + tFreeSMCfgDnodeReq(&cfgReq); int32_t code = -1; SSdb *pSdb = pMnode->pSdb; diff --git a/source/dnode/mnode/impl/src/mndIndex.c b/source/dnode/mnode/impl/src/mndIndex.c index 2e78116a86..041cc664e5 100644 --- a/source/dnode/mnode/impl/src/mndIndex.c +++ b/source/dnode/mnode/impl/src/mndIndex.c @@ -439,7 +439,7 @@ static int32_t mndProcessCreateIdxReq(SRpcMsg *pReq) { pDb = mndAcquireDbByStb(pMnode, createReq.stbName); if (pDb == NULL) { - terrno = TSDB_CODE_MND_INVALID_DB; + terrno = TSDB_CODE_MND_DB_NOT_EXIST; goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c index 5827a30b43..22b2fec857 100644 --- a/source/dnode/mnode/impl/src/mndMnode.c +++ b/source/dnode/mnode/impl/src/mndMnode.c @@ -656,7 +656,7 @@ static int32_t mndProcessCreateMnodeReq(SRpcMsg *pReq) { char obj[40] = {0}; sprintf(obj, "%d", createReq.dnodeId); - auditRecord(pReq, pMnode->clusterId, "createMnode", obj, "", ""); + auditRecord(pReq, pMnode->clusterId, "createMnode", obj, "", createReq.sql, createReq.sqlLen); _OVER: if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { @@ -665,6 +665,7 @@ _OVER: mndReleaseMnode(pMnode, pObj); mndReleaseDnode(pMnode, pDnode); + tFreeSMCreateQnodeReq(&createReq); return code; } @@ -797,7 +798,7 @@ static int32_t mndProcessDropMnodeReq(SRpcMsg *pReq) { char obj[40] = {0}; sprintf(obj, "%d", dropReq.dnodeId); - auditRecord(pReq, pMnode->clusterId, "dropMnode", obj, "", ""); + auditRecord(pReq, pMnode->clusterId, "dropMnode", obj, "", dropReq.sql, dropReq.sqlLen); _OVER: if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { @@ -805,6 +806,7 @@ _OVER: } mndReleaseMnode(pMnode, pObj); + tFreeSMCreateQnodeReq(&dropReq); return code; } diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index 6f67778615..1f8c3b161b 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -259,7 +259,7 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { if (pDb == NULL) { if (0 != strcmp(connReq.db, TSDB_INFORMATION_SCHEMA_DB) && (0 != strcmp(connReq.db, TSDB_PERFORMANCE_SCHEMA_DB))) { - terrno = TSDB_CODE_MND_INVALID_DB; + terrno = TSDB_CODE_MND_DB_NOT_EXIST; mGError("user:%s, failed to login from %s while use db:%s since %s", pReq->info.conn.user, ip, connReq.db, terrstr()); goto _OVER; @@ -314,10 +314,10 @@ _CONNECT: sprintf(obj, "%s:%d", ip, pConn->port); char detail[1000] = {0}; - sprintf(detail, "connType:%d, db:%s, pid:%d, startTime:%" PRId64 ", sVer:%s, app:%s", + sprintf(detail, "connType:%d, db:%s, pid:%d, startTime:%" PRId64 ", sVer:%s, app:%s", connReq.connType, connReq.db, connReq.pid, connReq.startTime, connReq.sVer, connReq.app); - auditRecord(pReq, pMnode->clusterId, "login", connReq.user, obj, detail); + auditRecord(pReq, pMnode->clusterId, "login", connReq.user, obj, detail, strlen(detail)); _OVER: diff --git a/source/dnode/mnode/impl/src/mndQnode.c b/source/dnode/mnode/impl/src/mndQnode.c index 767e06a8d4..af11476d64 100644 --- a/source/dnode/mnode/impl/src/mndQnode.c +++ b/source/dnode/mnode/impl/src/mndQnode.c @@ -310,7 +310,7 @@ static int32_t mndProcessCreateQnodeReq(SRpcMsg *pReq) { char obj[33] = {0}; sprintf(obj, "%d", createReq.dnodeId); - auditRecord(pReq, pMnode->clusterId, "createQnode", obj, "", ""); + auditRecord(pReq, pMnode->clusterId, "createQnode", obj, "", createReq.sql, createReq.sqlLen); _OVER: if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { mError("qnode:%d, failed to create since %s", createReq.dnodeId, terrstr()); @@ -318,6 +318,7 @@ _OVER: mndReleaseQnode(pMnode, pObj); mndReleaseDnode(pMnode, pDnode); + tFreeSMCreateQnodeReq(&createReq); return code; } @@ -423,7 +424,7 @@ static int32_t mndProcessDropQnodeReq(SRpcMsg *pReq) { char obj[33] = {0}; sprintf(obj, "%d", dropReq.dnodeId); - auditRecord(pReq, pMnode->clusterId, "dropQnode", obj, "", ""); + auditRecord(pReq, pMnode->clusterId, "dropQnode", obj, "", dropReq.sql, dropReq.sqlLen); _OVER: if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { @@ -431,6 +432,7 @@ _OVER: } mndReleaseQnode(pMnode, pObj); + tFreeSMCreateQnodeReq(&dropReq); return code; } diff --git a/source/dnode/mnode/impl/src/mndSnode.c b/source/dnode/mnode/impl/src/mndSnode.c index 5e98380a08..f4f9cbb535 100644 --- a/source/dnode/mnode/impl/src/mndSnode.c +++ b/source/dnode/mnode/impl/src/mndSnode.c @@ -316,6 +316,7 @@ _OVER: mndReleaseSnode(pMnode, pObj); mndReleaseDnode(pMnode, pDnode); + tFreeSMCreateQnodeReq(&createReq); return code; } @@ -425,6 +426,7 @@ _OVER: } mndReleaseSnode(pMnode, pObj); + tFreeSMCreateQnodeReq(&dropReq); return code; } diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index c47c4994b7..eaf74a96cb 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -859,18 +859,23 @@ int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreat return 0; } static int32_t mndGenIdxNameForFirstTag(char *fullname, char *dbname, char *tagname) { - char randStr[24] = {0}; + char randStr[TSDB_COL_NAME_LEN] = {0}; + int32_t left = TSDB_COL_NAME_LEN - strlen(tagname) - 1; + if (left <= 1) { + sprintf(fullname, "%s.%s", dbname, tagname); + } else { + int8_t start = left < 8 ? 0 : 8; + int8_t end = left >= 24 ? 24 : left - 1; + // gen rand str len [base:end] + // note: ignore rand performance issues + int64_t len = taosRand() % (end - start + 1) + start; + taosRandStr2(randStr, len); + sprintf(fullname, "%s.%s_%s", dbname, tagname, randStr); + } - int8_t start = 8; - int8_t end = sizeof(randStr) - 1; - // gen rand str len [base:end] - // note: ignore rand performance issues - int64_t len = taosRand() % (end - start + 1) + start; - - taosRandStr2(randStr, len); - sprintf(fullname, "%s.%s_%s", dbname, tagname, randStr); return 0; } + static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCreate, SDbObj *pDb) { SStbObj stbObj = {0}; int32_t code = -1; @@ -1075,80 +1080,6 @@ static int32_t mndBuildStbFromAlter(SStbObj *pStb, SStbObj *pDst, SMCreateStbReq return TSDB_CODE_SUCCESS; } -static char *mndAuditFieldTypeStr(int32_t type) { - switch (type) { - case TSDB_DATA_TYPE_NULL: - return "null"; - case TSDB_DATA_TYPE_BOOL: - return "bool"; - case TSDB_DATA_TYPE_TINYINT: - return "tinyint"; - case TSDB_DATA_TYPE_SMALLINT: - return "smallint"; - case TSDB_DATA_TYPE_INT: - return "int"; - case TSDB_DATA_TYPE_BIGINT: - return "bigint"; - case TSDB_DATA_TYPE_FLOAT: - return "float"; - case TSDB_DATA_TYPE_DOUBLE: - return "double"; - case TSDB_DATA_TYPE_VARCHAR: - return "varchar"; - case TSDB_DATA_TYPE_TIMESTAMP: - return "timestamp"; - case TSDB_DATA_TYPE_NCHAR: - return "nchar"; - case TSDB_DATA_TYPE_UTINYINT: - return "utinyint"; - case TSDB_DATA_TYPE_USMALLINT: - return "usmallint"; - case TSDB_DATA_TYPE_UINT: - return "uint"; - case TSDB_DATA_TYPE_UBIGINT: - return "ubigint"; - case TSDB_DATA_TYPE_JSON: - return "json"; - case TSDB_DATA_TYPE_VARBINARY: - return "varbinary"; - case TSDB_DATA_TYPE_DECIMAL: - return "decimal"; - case TSDB_DATA_TYPE_BLOB: - return "blob"; - case TSDB_DATA_TYPE_MEDIUMBLOB: - return "mediumblob"; - case TSDB_DATA_TYPE_GEOMETRY: - return "geometry"; - - default: - return "error"; - } -} - -static void mndAuditFieldStr(char *detail, SArray *arr, int32_t len, int32_t max) { - int32_t detialLen = strlen(detail); - int32_t fieldLen = 0; - for (int32_t i = 0; i < len; ++i) { - SField *pField = taosArrayGet(arr, i); - char field[TSDB_COL_NAME_LEN + 20] = {0}; - fieldLen = strlen(", "); - if (detialLen > 0 && detialLen < max - fieldLen - 1) { - strcat(detail, ", "); - detialLen += fieldLen; - } else { - break; - } - sprintf(field, "%s:%s", pField->name, mndAuditFieldTypeStr(pField->type)); - fieldLen = strlen(field); - if (detialLen < max - fieldLen - 1) { - strcat(detail, field); - detialLen += fieldLen; - } else { - break; - } - } -} - static int32_t mndProcessCreateStbReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; int32_t code = -1; @@ -1257,26 +1188,10 @@ static int32_t mndProcessCreateStbReq(SRpcMsg *pReq) { } if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS; - char detail[AUDIT_DETAIL_MAX] = {0}; - sprintf(detail, - "colVer:%d, delay1:%" PRId64 ", delay2:%" PRId64 ", deleteMark1:%" PRId64 - ", " - "deleteMark2:%" PRId64 - ", igExists:%d, numOfColumns:%d, numOfFuncs:%d, numOfTags:%d, " - "source:%d, suid:%" PRId64 - ", tagVer:%d, ttl:%d, " - "watermark1:%" PRId64 ", watermark2:%" PRId64, - createReq.colVer, createReq.delay1, createReq.delay2, createReq.deleteMark1, createReq.deleteMark2, - createReq.igExists, createReq.numOfColumns, createReq.numOfFuncs, createReq.numOfTags, createReq.source, - createReq.suid, createReq.tagVer, createReq.ttl, createReq.watermark1, createReq.watermark2); - - mndAuditFieldStr(detail, createReq.pColumns, createReq.numOfColumns, AUDIT_DETAIL_MAX); - mndAuditFieldStr(detail, createReq.pTags, createReq.numOfTags, AUDIT_DETAIL_MAX); - SName name = {0}; tNameFromString(&name, createReq.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); - auditRecord(pReq, pMnode->clusterId, "createStb", name.dbname, name.tname, detail); + auditRecord(pReq, pMnode->clusterId, "createStb", name.dbname, name.tname, createReq.sql, createReq.sqlLen); _OVER: if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { @@ -2333,7 +2248,7 @@ static int32_t mndProcessAlterStbReq(SRpcMsg *pReq) { pDb = mndAcquireDbByStb(pMnode, alterReq.name); if (pDb == NULL) { - terrno = TSDB_CODE_MND_INVALID_DB; + terrno = TSDB_CODE_MND_DB_NOT_EXIST; goto _OVER; } @@ -2350,13 +2265,10 @@ static int32_t mndProcessAlterStbReq(SRpcMsg *pReq) { code = mndAlterStb(pMnode, pReq, &alterReq, pDb, pStb); if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS; - char detail[2000] = {0}; - sprintf(detail, "alterType:%d, numOfFields:%d, ttl:%d", alterReq.alterType, alterReq.numOfFields, alterReq.ttl); - SName name = {0}; tNameFromString(&name, alterReq.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); - auditRecord(pReq, pMnode->clusterId, "alterStb", name.dbname, name.tname, detail); + auditRecord(pReq, pMnode->clusterId, "alterStb", name.dbname, name.tname, alterReq.sql, alterReq.sqlLen); _OVER: if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { @@ -2448,6 +2360,7 @@ static int32_t mndDropStb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *p if (mndSetDropStbRedoActions(pMnode, pTrans, pDb, pStb) != 0) goto _OVER; if (mndDropIdxsByStb(pMnode, pTrans, pDb, pStb) != 0) goto _OVER; if (mndDropSmasByStb(pMnode, pTrans, pDb, pStb) != 0) goto _OVER; + if (mndUserRemoveStb(pMnode, pTrans, pStb->name) != 0) goto _OVER; if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; code = 0; @@ -2619,13 +2532,10 @@ static int32_t mndProcessDropStbReq(SRpcMsg *pReq) { code = mndDropStb(pMnode, pReq, pDb, pStb); if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS; - char detail[2000] = {0}; - sprintf(detail, "igNotExists:%d, source:%d", dropReq.igNotExists, dropReq.source); - SName name = {0}; tNameFromString(&name, dropReq.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); - auditRecord(pReq, pMnode->clusterId, "dropStb", name.dbname, name.tname, detail); + auditRecord(pReq, pMnode->clusterId, "dropStb", name.dbname, name.tname, dropReq.sql, dropReq.sqlLen); _OVER: if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { @@ -2634,6 +2544,7 @@ _OVER: mndReleaseDb(pMnode, pDb); mndReleaseStb(pMnode, pStb); + tFreeSMDropStbReq(&dropReq); return code; } @@ -3627,7 +3538,7 @@ static int32_t mndProcessCreateIndexReq(SRpcMsg *pReq) { pDb = mndAcquireDbByStb(pMnode, tagIdxReq.dbFName); if (pDb == NULL) { - terrno = TSDB_CODE_MND_INVALID_DB; + terrno = TSDB_CODE_MND_DB_NOT_EXIST; goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 8ba8f613b1..06c0e96c02 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -854,22 +854,15 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { code = TSDB_CODE_ACTION_IN_PROGRESS; - char detail[2000] = {0}; - sprintf(detail, - "checkpointFreq:%" PRId64 ", createStb:%d, deleteMark:%" PRId64 - ", fillHistory:%d, igExists:%d, igExpired:%d, igUpdate:%d, lastTs:%" PRId64 ", maxDelay:%" PRId64 - ", numOfTags:%d, sourceDB:%s, targetStbFullName:%s, triggerType:%d, watermark:%" PRId64, - createStreamReq.checkpointFreq, createStreamReq.createStb, createStreamReq.deleteMark, - createStreamReq.fillHistory, createStreamReq.igExists, createStreamReq.igExpired, createStreamReq.igUpdate, - createStreamReq.lastTs, createStreamReq.maxDelay, createStreamReq.numOfTags, createStreamReq.sourceDB, - createStreamReq.targetStbFullName, createStreamReq.triggerType, createStreamReq.watermark); - SName name = {0}; tNameFromString(&name, createStreamReq.name, T_NAME_ACCT | T_NAME_DB); //reuse this function for stream - auditRecord(pReq, pMnode->clusterId, "createStream", name.dbname, "", detail); - + //TODO + if (createStreamReq.sql != NULL) { + auditRecord(pReq, pMnode->clusterId, "createStream", name.dbname, "", + createStreamReq.sql, strlen(createStreamReq.sql)); + } _OVER: if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { mError("stream:%s, failed to create since %s", createStreamReq.name, terrstr()); @@ -1090,7 +1083,7 @@ static int32_t mndAddStreamCheckpointToTrans(STrans *pTrans, SStreamObj *pStream pStream->checkpointId = chkptId; pStream->checkpointFreq = taosGetTimestampMs(); - atomic_store_64(&pStream->currentTick, 0); + pStream->currentTick = 0; // 3. commit log: stream checkpoint info pStream->version = pStream->version + 1; @@ -1259,15 +1252,18 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) { if (dropReq.igNotExists) { mInfo("stream:%s, not exist, ignore not exist is set", dropReq.name); sdbRelease(pMnode->pSdb, pStream); + tFreeSMDropStreamReq(&dropReq); return 0; } else { terrno = TSDB_CODE_MND_STREAM_NOT_EXIST; + tFreeSMDropStreamReq(&dropReq); return -1; } } if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pStream->targetDb) != 0) { sdbRelease(pMnode->pSdb, pStream); + tFreeSMDropStreamReq(&dropReq); return -1; } @@ -1275,6 +1271,7 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) { if (pTrans == NULL) { mError("stream:%s, failed to drop since %s", dropReq.name, terrstr()); sdbRelease(pMnode->pSdb, pStream); + tFreeSMDropStreamReq(&dropReq); return -1; } @@ -1284,6 +1281,7 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) { if (mndTransCheckConflict(pMnode, pTrans) != 0) { sdbRelease(pMnode->pSdb, pStream); mndTransDrop(pTrans); + tFreeSMDropStreamReq(&dropReq); return -1; } @@ -1292,6 +1290,7 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) { mError("stream:%s, failed to drop task since %s", dropReq.name, terrstr()); sdbRelease(pMnode->pSdb, pStream); mndTransDrop(pTrans); + tFreeSMDropStreamReq(&dropReq); return -1; } @@ -1299,6 +1298,7 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) { if (mndPersistDropStreamLog(pMnode, pTrans, pStream) < 0) { sdbRelease(pMnode->pSdb, pStream); mndTransDrop(pTrans); + tFreeSMDropStreamReq(&dropReq); return -1; } @@ -1306,20 +1306,21 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) { mError("trans:%d, failed to prepare drop stream trans since %s", pTrans->id, terrstr()); sdbRelease(pMnode->pSdb, pStream); mndTransDrop(pTrans); + tFreeSMDropStreamReq(&dropReq); return -1; } removeStreamTasksInBuf(pStream, &execNodeList); - char detail[100] = {0}; - sprintf(detail, "igNotExists:%d", dropReq.igNotExists); - SName name = {0}; tNameFromString(&name, dropReq.name, T_NAME_ACCT | T_NAME_DB); - auditRecord(pReq, pMnode->clusterId, "dropStream", name.dbname, "", detail); + //reuse this function for stream + + auditRecord(pReq, pMnode->clusterId, "dropStream", name.dbname, "", dropReq.sql, dropReq.sqlLen); sdbRelease(pMnode->pSdb, pStream); mndTransDrop(pTrans); + tFreeSMDropStreamReq(&dropReq); return TSDB_CODE_ACTION_IN_PROGRESS; } diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index 94fd6027c0..e96acfef86 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -629,16 +629,6 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) { code = TSDB_CODE_ACTION_IN_PROGRESS; } - char detail[4000] = {0}; - char sql[3000] = {0}; - strncpy(sql, createTopicReq.sql, 2999); - - SName tableName = {0}; - tNameFromString(&tableName, createTopicReq.subStbName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); - - sprintf(detail, "igExists:%d, subStbName:%s, subType:%d, withMeta:%d, sql:%s", - createTopicReq.igExists, tableName.tname, createTopicReq.subType, createTopicReq.withMeta, sql); - SName dbname = {0}; tNameFromString(&dbname, createTopicReq.subDbName, T_NAME_ACCT | T_NAME_DB); @@ -646,7 +636,8 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) { tNameFromString(&topicName, createTopicReq.name, T_NAME_ACCT | T_NAME_DB); //reuse this function for topic - auditRecord(pReq, pMnode->clusterId, "createTopic", topicName.dbname, dbname.dbname, detail); + auditRecord(pReq, pMnode->clusterId, "createTopic", topicName.dbname, dbname.dbname, + createTopicReq.sql, strlen(createTopicReq.sql)); _OVER: if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { @@ -697,10 +688,12 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { if (pTopic == NULL) { if (dropReq.igNotExists) { mInfo("topic:%s, not exist, ignore not exist is set", dropReq.name); + tFreeSMDropTopicReq(&dropReq); return 0; } else { terrno = TSDB_CODE_MND_TOPIC_NOT_EXIST; mError("topic:%s, failed to drop since %s", dropReq.name, terrstr()); + tFreeSMDropTopicReq(&dropReq); return -1; } } @@ -841,17 +834,17 @@ end: mndTransDrop(pTrans); if (code != 0) { mError("topic:%s, failed to drop since %s", dropReq.name, terrstr()); + tFreeSMDropTopicReq(&dropReq); return code; } - char detail[100] = {0}; - sprintf(detail, "igNotExists:%d", dropReq.igNotExists); - SName name = {0}; tNameFromString(&name, dropReq.name, T_NAME_ACCT | T_NAME_DB); //reuse this function for topic - auditRecord(pReq, pMnode->clusterId, "dropTopic", name.dbname, "", detail); + auditRecord(pReq, pMnode->clusterId, "dropTopic", name.dbname, "", dropReq.sql, dropReq.sqlLen); + + tFreeSMDropTopicReq(&dropReq); return TSDB_CODE_ACTION_IN_PROGRESS; } diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 02af5e8ede..1d8dd5e345 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -544,7 +544,9 @@ STrans *mndAcquireTrans(SMnode *pMnode, int32_t transId) { if (pTrans == NULL) { terrno = TSDB_CODE_MND_TRANS_NOT_EXIST; } else { + #ifdef WINDOWS taosThreadMutexInit(&pTrans->mutex, NULL); + #endif } return pTrans; } diff --git a/source/dnode/mnode/impl/src/mndUser.c b/source/dnode/mnode/impl/src/mndUser.c index f38f825302..2789f5a9d4 100644 --- a/source/dnode/mnode/impl/src/mndUser.c +++ b/source/dnode/mnode/impl/src/mndUser.c @@ -1275,11 +1275,7 @@ static int32_t mndProcessCreateUserReq(SRpcMsg *pReq) { code = mndCreateUser(pMnode, pOperUser->acct, &createReq, pReq); if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS; - char detail[1000] = {0}; - sprintf(detail, "createType:%d, enable:%d, superUser:%d, sysInfo:%d", createReq.createType, createReq.enable, - createReq.superUser, createReq.sysInfo); - - auditRecord(pReq, pMnode->clusterId, "createUser", createReq.user, "", detail); + auditRecord(pReq, pMnode->clusterId, "createUser", createReq.user, "", createReq.sql, createReq.sqlLen); _OVER: if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { @@ -1289,6 +1285,7 @@ _OVER: mndReleaseUser(pMnode, pUser); mndReleaseUser(pMnode, pOperUser); tFreeSCreateUserReq(&createReq); + return code; } @@ -1818,41 +1815,51 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) { code = mndAlterUser(pMnode, pUser, &newUser, pReq); if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS; - char detail[1000] = {0}; - sprintf(detail, "alterType:%s, enable:%d, superUser:%d, sysInfo:%d, tabName:%s, password:", - mndUserAuditTypeStr(alterReq.alterType), alterReq.enable, alterReq.superUser, alterReq.sysInfo, - alterReq.tabName); - - if (alterReq.alterType == TSDB_ALTER_USER_PASSWD) { + if(alterReq.alterType == TSDB_ALTER_USER_PASSWD){ + char detail[1000] = {0}; sprintf(detail, "alterType:%s, enable:%d, superUser:%d, sysInfo:%d, tabName:%s, password:xxx", mndUserAuditTypeStr(alterReq.alterType), alterReq.enable, alterReq.superUser, alterReq.sysInfo, alterReq.tabName); - auditRecord(pReq, pMnode->clusterId, "alterUser", alterReq.user, "", detail); - } else if (alterReq.alterType == TSDB_ALTER_USER_SUPERUSER || alterReq.alterType == TSDB_ALTER_USER_ENABLE || - alterReq.alterType == TSDB_ALTER_USER_SYSINFO) { - auditRecord(pReq, pMnode->clusterId, "alterUser", alterReq.user, "", detail); - } else if (alterReq.alterType == TSDB_ALTER_USER_ADD_READ_DB || alterReq.alterType == TSDB_ALTER_USER_ADD_WRITE_DB || - alterReq.alterType == TSDB_ALTER_USER_ADD_ALL_DB || alterReq.alterType == TSDB_ALTER_USER_ADD_READ_TABLE || - alterReq.alterType == TSDB_ALTER_USER_ADD_WRITE_TABLE || - alterReq.alterType == TSDB_ALTER_USER_ADD_ALL_TABLE) { - if (strcmp(alterReq.objname, "1.*") != 0) { + auditRecord(pReq, pMnode->clusterId, "alterUser", alterReq.user, "", detail, strlen(detail)); + } + else if(alterReq.alterType == TSDB_ALTER_USER_SUPERUSER || + alterReq.alterType == TSDB_ALTER_USER_ENABLE || + alterReq.alterType == TSDB_ALTER_USER_SYSINFO){ + auditRecord(pReq, pMnode->clusterId, "alterUser", alterReq.user, "", alterReq.sql, alterReq.sqlLen); + } + else if(alterReq.alterType == TSDB_ALTER_USER_ADD_READ_DB|| + alterReq.alterType == TSDB_ALTER_USER_ADD_WRITE_DB|| + alterReq.alterType == TSDB_ALTER_USER_ADD_ALL_DB|| + alterReq.alterType == TSDB_ALTER_USER_ADD_READ_TABLE|| + alterReq.alterType == TSDB_ALTER_USER_ADD_WRITE_TABLE|| + alterReq.alterType == TSDB_ALTER_USER_ADD_ALL_TABLE){ + if (strcmp(alterReq.objname, "1.*") != 0){ SName name = {0}; tNameFromString(&name, alterReq.objname, T_NAME_ACCT | T_NAME_DB); - auditRecord(pReq, pMnode->clusterId, "GrantPrivileges", alterReq.user, name.dbname, detail); - } else { - auditRecord(pReq, pMnode->clusterId, "GrantPrivileges", alterReq.user, "*", detail); + auditRecord(pReq, pMnode->clusterId, "GrantPrivileges", alterReq.user, name.dbname, + alterReq.sql, alterReq.sqlLen); + }else{ + auditRecord(pReq, pMnode->clusterId, "GrantPrivileges", alterReq.user, "*", + alterReq.sql, alterReq.sqlLen); } - } else if (alterReq.alterType == TSDB_ALTER_USER_ADD_SUBSCRIBE_TOPIC) { - auditRecord(pReq, pMnode->clusterId, "GrantPrivileges", alterReq.user, alterReq.objname, detail); - } else if (alterReq.alterType == TSDB_ALTER_USER_REMOVE_SUBSCRIBE_TOPIC) { - auditRecord(pReq, pMnode->clusterId, "RevokePrivileges", alterReq.user, alterReq.objname, detail); - } else { - if (strcmp(alterReq.objname, "1.*") != 0) { + } + else if(alterReq.alterType == TSDB_ALTER_USER_ADD_SUBSCRIBE_TOPIC){ + auditRecord(pReq, pMnode->clusterId, "GrantPrivileges", alterReq.user, alterReq.objname, + alterReq.sql, alterReq.sqlLen); + } + else if(alterReq.alterType == TSDB_ALTER_USER_REMOVE_SUBSCRIBE_TOPIC){ + auditRecord(pReq, pMnode->clusterId, "RevokePrivileges", alterReq.user, alterReq.objname, + alterReq.sql, alterReq.sqlLen); + } + else{ + if (strcmp(alterReq.objname, "1.*") != 0){ SName name = {0}; tNameFromString(&name, alterReq.objname, T_NAME_ACCT | T_NAME_DB); - auditRecord(pReq, pMnode->clusterId, "RevokePrivileges", alterReq.user, name.dbname, detail); - } else { - auditRecord(pReq, pMnode->clusterId, "RevokePrivileges", alterReq.user, "*", detail); + auditRecord(pReq, pMnode->clusterId, "RevokePrivileges", alterReq.user, name.dbname, + alterReq.sql, alterReq.sqlLen); + }else{ + auditRecord(pReq, pMnode->clusterId, "RevokePrivileges", alterReq.user, "*", + alterReq.sql, alterReq.sqlLen); } } @@ -1926,7 +1933,7 @@ static int32_t mndProcessDropUserReq(SRpcMsg *pReq) { code = mndDropUser(pMnode, pReq, pUser); if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS; - auditRecord(pReq, pMnode->clusterId, "dropUser", dropReq.user, "", ""); + auditRecord(pReq, pMnode->clusterId, "dropUser", dropReq.user, "", dropReq.sql, dropReq.sqlLen); _OVER: if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { @@ -1934,6 +1941,7 @@ _OVER: } mndReleaseUser(pMnode, pUser); + tFreeSDropUserReq(&dropReq); return code; } @@ -2423,6 +2431,47 @@ int32_t mndUserRemoveDb(SMnode *pMnode, STrans *pTrans, char *db) { return code; } +int32_t mndUserRemoveStb(SMnode *pMnode, STrans *pTrans, char *stb) { + int32_t code = 0; + SSdb *pSdb = pMnode->pSdb; + int32_t len = strlen(stb) + 1; + void *pIter = NULL; + SUserObj *pUser = NULL; + SUserObj newUser = {0}; + + while (1) { + pIter = sdbFetch(pSdb, SDB_USER, pIter, (void **)&pUser); + if (pIter == NULL) break; + + code = -1; + if (mndUserDupObj(pUser, &newUser) != 0) { + break; + } + + bool inRead = (taosHashGet(newUser.readTbs, stb, len) != NULL); + bool inWrite = (taosHashGet(newUser.writeTbs, stb, len) != NULL); + if (inRead || inWrite) { + (void)taosHashRemove(newUser.readTbs, stb, len); + (void)taosHashRemove(newUser.writeTbs, stb, len); + + SSdbRaw *pCommitRaw = mndUserActionEncode(&newUser); + if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) { + break; + } + (void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); + } + + mndUserFreeObj(&newUser); + sdbRelease(pSdb, pUser); + code = 0; + } + + if (pUser != NULL) sdbRelease(pSdb, pUser); + if (pIter != NULL) sdbCancelFetch(pSdb, pIter); + mndUserFreeObj(&newUser); + return code; +} + int32_t mndUserRemoveTopic(SMnode *pMnode, STrans *pTrans, char *topic) { int32_t code = 0; SSdb *pSdb = pMnode->pSdb; diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index 9e9397a927..417dcb0ce0 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -2177,11 +2177,7 @@ static int32_t mndProcessRedistributeVgroupMsg(SRpcMsg *pReq) { char obj[33] = {0}; sprintf(obj, "%d", req.vgId); - char detail[1000] = {0}; - sprintf(detail, "dnodeId1:%d, dnodeId2:%d, dnodeId3:%d", - req.dnodeId1, req.dnodeId2, req.dnodeId3); - - auditRecord(pReq, pMnode->clusterId, "RedistributeVgroup", obj, "", detail); + auditRecord(pReq, pMnode->clusterId, "RedistributeVgroup", obj, "", req.sql, req.sqlLen); _OVER: if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { @@ -2197,6 +2193,7 @@ _OVER: mndReleaseDnode(pMnode, pOld3); mndReleaseVgroup(pMnode, pVgroup); mndReleaseDb(pMnode, pDb); + tFreeSRedistributeVgroupReq(&req); return code; } @@ -2993,7 +2990,7 @@ static int32_t mndProcessBalanceVgroupMsg(SRpcMsg *pReq) { code = mndBalanceVgroup(pMnode, pReq, pArray); } - auditRecord(pReq, pMnode->clusterId, "balanceVgroup", "", "", ""); + auditRecord(pReq, pMnode->clusterId, "balanceVgroup", "", "", req.sql, req.sqlLen); _OVER: if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { @@ -3001,6 +2998,7 @@ _OVER: } taosArrayDestroy(pArray); + tFreeSBalanceVgroupReq(&req); return code; } diff --git a/source/dnode/mnode/impl/test/profile/profile.cpp b/source/dnode/mnode/impl/test/profile/profile.cpp index 6ab6d364cb..b1b94c65fb 100644 --- a/source/dnode/mnode/impl/test/profile/profile.cpp +++ b/source/dnode/mnode/impl/test/profile/profile.cpp @@ -65,7 +65,7 @@ TEST_F(MndTestProfile, 01_ConnectMsg) { connId = connectRsp.connId; } -TEST_F(MndTestProfile, 02_ConnectMsg_InvalidDB) { +TEST_F(MndTestProfile, 02_ConnectMsg_NotExistDB) { char passwd[] = "taosdata"; char secretEncrypt[TSDB_PASSWORD_LEN + 1] = {0}; taosEncryptPass_c((uint8_t*)passwd, strlen(passwd), secretEncrypt); @@ -73,7 +73,7 @@ TEST_F(MndTestProfile, 02_ConnectMsg_InvalidDB) { SConnectReq connectReq = {0}; connectReq.pid = 1234; strcpy(connectReq.app, "mnode_test_profile"); - strcpy(connectReq.db, "invalid_db"); + strcpy(connectReq.db, "not_exist_db"); strcpy(connectReq.user, "root"); strcpy(connectReq.passwd, secretEncrypt); strcpy(connectReq.sVer, version); @@ -84,7 +84,7 @@ TEST_F(MndTestProfile, 02_ConnectMsg_InvalidDB) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_CONNECT, pReq, contLen); ASSERT_NE(pRsp, nullptr); - ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_DB); + ASSERT_EQ(pRsp->code, TSDB_CODE_MND_DB_NOT_EXIST); ASSERT_EQ(pRsp->contLen, 0); } diff --git a/source/dnode/mnode/impl/test/stb/stb.cpp b/source/dnode/mnode/impl/test/stb/stb.cpp index dd03917fc2..1adbb87e19 100644 --- a/source/dnode/mnode/impl/test/stb/stb.cpp +++ b/source/dnode/mnode/impl/test/stb/stb.cpp @@ -448,7 +448,7 @@ TEST_F(MndTestStb, 02_Alter_Stb_AddTag) { { void* pReq = BuildAlterStbAddTagReq("1.d3.stb", "tag4", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); - ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_DB); + ASSERT_EQ(pRsp->code, TSDB_CODE_MND_DB_NOT_EXIST); } { @@ -665,7 +665,7 @@ TEST_F(MndTestStb, 06_Alter_Stb_AddColumn) { { void* pReq = BuildAlterStbAddColumnReq("1.d7.stb", "tag4", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); - ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_DB); + ASSERT_EQ(pRsp->code, TSDB_CODE_MND_DB_NOT_EXIST); } { diff --git a/source/dnode/vnode/src/meta/metaCommit.c b/source/dnode/vnode/src/meta/metaCommit.c index 1494325657..f5572e68dd 100644 --- a/source/dnode/vnode/src/meta/metaCommit.c +++ b/source/dnode/vnode/src/meta/metaCommit.c @@ -52,7 +52,9 @@ int metaFinishCommit(SMeta *pMeta, TXN *txn) { return tdbPostCommit(pMeta->pEnv int metaPrepareAsyncCommit(SMeta *pMeta) { // return tdbPrepareAsyncCommit(pMeta->pEnv, pMeta->txn); int code = 0; + metaWLock(pMeta); code = ttlMgrFlush(pMeta->pTtlMgr, pMeta->txn); + metaULock(pMeta); code = tdbCommit(pMeta->pEnv, pMeta->txn); return code; diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 442a739076..f600925a0b 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -1016,21 +1016,16 @@ end: } int metaTtlFindExpired(SMeta *pMeta, int64_t timePointMs, SArray *tbUids, int32_t ttlDropMaxCount) { - metaWLock(pMeta); - int ret = ttlMgrFlush(pMeta->pTtlMgr, pMeta->txn); - if (ret != 0) { - metaError("ttl failed to flush, ret:%d", ret); - goto _err; - } + metaRLock(pMeta); + + int ret = ttlMgrFindExpired(pMeta->pTtlMgr, timePointMs, tbUids, ttlDropMaxCount); + + metaULock(pMeta); - ret = ttlMgrFindExpired(pMeta->pTtlMgr, timePointMs, tbUids, ttlDropMaxCount); if (ret != 0) { metaError("ttl failed to find expired table, ret:%d", ret); - goto _err; } -_err: - metaULock(pMeta); return ret; } diff --git a/source/dnode/vnode/src/meta/metaTtl.c b/source/dnode/vnode/src/meta/metaTtl.c index f920296b4a..58ecf54512 100644 --- a/source/dnode/vnode/src/meta/metaTtl.c +++ b/source/dnode/vnode/src/meta/metaTtl.c @@ -299,7 +299,7 @@ int ttlMgrInsertTtl(STtlManger *pTtlMgr, const STtlUpdTtlCtx *updCtx) { ret = 0; _out: - metaDebug("%s, ttl mgr insert ttl, uid: %" PRId64 ", ctime: %" PRId64 ", ttlDays: %" PRId64, pTtlMgr->logPrefix, + metaTrace("%s, ttl mgr insert ttl, uid: %" PRId64 ", ctime: %" PRId64 ", ttlDays: %" PRId64, pTtlMgr->logPrefix, updCtx->uid, updCtx->changeTimeMs, updCtx->ttlDays); return ret; @@ -323,7 +323,7 @@ int ttlMgrDeleteTtl(STtlManger *pTtlMgr, const STtlDelTtlCtx *delCtx) { ret = 0; _out: - metaDebug("%s, ttl mgr delete ttl, uid: %" PRId64, pTtlMgr->logPrefix, delCtx->uid); + metaTrace("%s, ttl mgr delete ttl, uid: %" PRId64, pTtlMgr->logPrefix, delCtx->uid); return ret; } @@ -363,17 +363,37 @@ int ttlMgrUpdateChangeTime(STtlManger *pTtlMgr, const STtlUpdCtimeCtx *pUpdCtime ret = 0; _out: - metaDebug("%s, ttl mgr update ctime, uid: %" PRId64 ", ctime: %" PRId64, pTtlMgr->logPrefix, pUpdCtimeCtx->uid, + metaTrace("%s, ttl mgr update ctime, uid: %" PRId64 ", ctime: %" PRId64, pTtlMgr->logPrefix, pUpdCtimeCtx->uid, pUpdCtimeCtx->changeTimeMs); return ret; } int ttlMgrFindExpired(STtlManger *pTtlMgr, int64_t timePointMs, SArray *pTbUids, int32_t ttlDropMaxCount) { + int ret = -1; + STtlIdxKeyV1 ttlKey = {.deleteTimeMs = timePointMs, .uid = INT64_MAX}; STtlExpiredCtx expiredCtx = { .ttlDropMaxCount = ttlDropMaxCount, .count = 0, .expiredKey = ttlKey, .pTbUids = pTbUids}; - return tdbTbTraversal(pTtlMgr->pTtlIdx, &expiredCtx, ttlMgrFindExpiredOneEntry); + ret = tdbTbTraversal(pTtlMgr->pTtlIdx, &expiredCtx, ttlMgrFindExpiredOneEntry); + if (ret) { + goto _out; + } + + size_t vIdx = 0; + for (size_t i = 0; i < pTbUids->size; i++) { + tb_uid_t *pUid = taosArrayGet(pTbUids, i); + if (taosHashGet(pTtlMgr->pDirtyUids, pUid, sizeof(tb_uid_t)) == NULL) { + // not in dirty && expired in tdb => must be expired + taosArraySet(pTbUids, vIdx, pUid); + vIdx++; + } + } + + taosArrayPopTailBatch(pTbUids, pTbUids->size - vIdx); + +_out: + return ret; } static bool ttlMgrNeedFlush(STtlManger *pTtlMgr) { diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index b9d412b89f..8f3661dffa 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -712,7 +712,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg continue; } if (pHandle->consumerId == req.newConsumerId) { // do nothing - tqInfo("vgId:%d no switch consumer:0x%" PRIx64 " remains", req.vgId, req.newConsumerId); + tqInfo("vgId:%d no switch consumer:0x%" PRIx64 " remains, because redo wal log", req.vgId, req.newConsumerId); } else { tqInfo("vgId:%d switch consumer from Id:0x%" PRIx64 " to Id:0x%" PRIx64, req.vgId, pHandle->consumerId, req.newConsumerId); diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 855b874ef0..2a56cd3847 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -102,6 +102,7 @@ bool isValValidForTable(STqHandle* pHandle, SWalCont* pHead) { for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { pCreateReq = req.pReqs + iReq; taosMemoryFreeClear(pCreateReq->comment); + taosMemoryFreeClear(pCreateReq->sql); if (pCreateReq->type == TSDB_CHILD_TABLE) { taosArrayDestroy(pCreateReq->ctb.tagName); } @@ -268,6 +269,8 @@ STqReader* tqReaderOpen(SVnode* pVnode) { } void tqReaderClose(STqReader* pReader) { + if (pReader == NULL) return; + // close wal reader if (pReader->pWalReader) { walCloseReader(pReader->pWalReader); @@ -1118,6 +1121,7 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) { taosArrayDestroy(list); taosHashCancelIterate(pTq->pHandle, pIter); taosWUnLockLatch(&pTq->lock); + return ret; } tqReaderSetTbUidList(pTqHandle->execHandle.pTqReader, list, NULL); @@ -1128,6 +1132,7 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) { } } taosWUnLockLatch(&pTq->lock); + // update the table list handle for each stream scanner/wal reader taosWLockLatch(&pTq->pStreamMeta->lock); while (1) { diff --git a/source/dnode/vnode/src/tq/tqStreamStateSnap.c b/source/dnode/vnode/src/tq/tqStreamStateSnap.c index 1f143eaf44..41392ba27b 100644 --- a/source/dnode/vnode/src/tq/tqStreamStateSnap.c +++ b/source/dnode/vnode/src/tq/tqStreamStateSnap.c @@ -91,7 +91,7 @@ int32_t streamStateSnapRead(SStreamStateReader* pReader, uint8_t** ppData) { uint8_t* rowData = NULL; int64_t len; code = streamSnapRead(pReader->pReaderImpl, &rowData, &len); - if (rowData == NULL || len == 0) { + if (code != 0 || rowData == NULL || len == 0) { return code; } *ppData = taosMemoryMalloc(sizeof(SSnapDataHdr) + len); @@ -104,8 +104,8 @@ int32_t streamStateSnapRead(SStreamStateReader* pReader, uint8_t** ppData) { pHdr->type = SNAP_DATA_STREAM_STATE_BACKEND; pHdr->size = len; memcpy(pHdr->data, rowData, len); - taosMemoryFree(rowData); tqDebug("vgId:%d, vnode stream-state snapshot read data success", TD_VID(pReader->pTq->pVnode)); + taosMemoryFree(rowData); return code; _err: diff --git a/source/dnode/vnode/src/tq/tqStreamTask.c b/source/dnode/vnode/src/tq/tqStreamTask.c index d8577453ba..b9cb22e7a4 100644 --- a/source/dnode/vnode/src/tq/tqStreamTask.c +++ b/source/dnode/vnode/src/tq/tqStreamTask.c @@ -354,7 +354,7 @@ static bool taskReadyForDataFromWal(SStreamTask* pTask) { tqDebug("s-task:%s inputQ is blocked, do nothing", pTask->id.idStr); return false; } - + return true; } diff --git a/source/dnode/vnode/src/tq/tqStreamTaskSnap.c b/source/dnode/vnode/src/tq/tqStreamTaskSnap.c index 9dc918dcc7..09fffa1f74 100644 --- a/source/dnode/vnode/src/tq/tqStreamTaskSnap.c +++ b/source/dnode/vnode/src/tq/tqStreamTaskSnap.c @@ -198,6 +198,8 @@ int32_t streamTaskSnapWriterClose(SStreamTaskWriter* pWriter, int8_t rollback) { taosWLockLatch(&pTq->pStreamMeta->lock); tqDebug("vgId:%d, vnode stream-task snapshot writer closed", TD_VID(pTq->pVnode)); + + taosWLockLatch(&pTq->pStreamMeta->lock); if (rollback) { tdbAbort(pTq->pStreamMeta->db, pTq->pStreamMeta->txn); } else { @@ -206,6 +208,12 @@ int32_t streamTaskSnapWriterClose(SStreamTaskWriter* pWriter, int8_t rollback) { code = tdbPostCommit(pTq->pStreamMeta->db, pTq->pStreamMeta->txn); if (code) goto _err; } + if (tdbBegin(pTq->pStreamMeta->db, &pTq->pStreamMeta->txn, tdbDefaultMalloc, tdbDefaultFree, NULL, 0) < 0) { + code = -1; + goto _err; + } + + taosWUnLockLatch(&pTq->pStreamMeta->lock); if (tdbBegin(pTq->pStreamMeta->db, &pTq->pStreamMeta->txn, tdbDefaultMalloc, tdbDefaultFree, NULL, 0) < 0) { code = -1; @@ -241,6 +249,7 @@ int32_t streamTaskSnapWrite(SStreamTaskWriter* pWriter, uint8_t* pData, uint32_t } tDecoderClear(&decoder); // tdbTbInsert(TTB *pTb, const void *pKey, int keyLen, const void *pVal, int valLen, TXN *pTxn) + int64_t key[2] = {taskId.streamId, taskId.taskId}; taosWLockLatch(&pTq->pStreamMeta->lock); if (tdbTbUpsert(pTq->pStreamMeta->pTaskDb, key, sizeof(int64_t) << 1, (uint8_t*)pData + sizeof(SSnapDataHdr), diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index 46228a46a2..897e3f1e2e 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -108,7 +108,6 @@ static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHand if (pRequest->useSnapshot) { tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey:%s, vgId:%d, (earliest) set offset to be snapshot", consumerId, pHandle->subKey, vgId); - if (pHandle->fetchMeta) { tqOffsetResetToMeta(pOffsetVal, 0); } else { diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c index caf88f55fc..e9e848f1b0 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c @@ -415,6 +415,9 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32 _end: tsdbUntakeReadSnap2((STsdbReader*)pr, pr->pReadSnap, true); + if (pr->pCurFileSet) { + pr->pCurFileSet = NULL; + } taosThreadMutexUnlock(&pr->readerMutex); diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit2.c b/source/dnode/vnode/src/tsdb/tsdbCommit2.c index d4cb63fb7b..79964c5636 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCommit2.c +++ b/source/dnode/vnode/src/tsdb/tsdbCommit2.c @@ -185,29 +185,22 @@ static int32_t tsdbCommitTombData(SCommitter2 *committer) { } if (record->ekey < committer->ctx->minKey) { - goto _next; + // do nothing } else if (record->skey > committer->ctx->maxKey) { - committer->ctx->maxKey = TMIN(record->skey, committer->ctx->maxKey); - goto _next; + committer->ctx->nextKey = TMIN(record->skey, committer->ctx->nextKey); + } else { + if (record->ekey > committer->ctx->maxKey) { + committer->ctx->nextKey = TMIN(committer->ctx->nextKey, committer->ctx->maxKey + 1); + } + + record->skey = TMAX(record->skey, committer->ctx->minKey); + record->ekey = TMIN(record->ekey, committer->ctx->maxKey); + + numRecord++; + code = tsdbFSetWriteTombRecord(committer->writer, record); + TSDB_CHECK_CODE(code, lino, _exit); } - TSKEY maxKey = committer->ctx->maxKey; - if (record->ekey > committer->ctx->maxKey) { - maxKey = committer->ctx->maxKey + 1; - } - - if (record->ekey > committer->ctx->maxKey && committer->ctx->nextKey > maxKey) { - committer->ctx->nextKey = maxKey; - } - - record->skey = TMAX(record->skey, committer->ctx->minKey); - record->ekey = TMIN(record->ekey, maxKey); - - numRecord++; - code = tsdbFSetWriteTombRecord(committer->writer, record); - TSDB_CHECK_CODE(code, lino, _exit); - - _next: code = tsdbIterMergerNext(committer->tombIterMerger); TSDB_CHECK_CODE(code, lino, _exit); } @@ -569,6 +562,8 @@ int32_t tsdbCommitBegin(STsdb *tsdb, SCommitInfo *info) { } else { SCommitter2 committer[1]; + tsdbFSCheckCommit(tsdb->pFS); + code = tsdbOpenCommitter(tsdb, info, committer); TSDB_CHECK_CODE(code, lino, _exit); diff --git a/source/dnode/vnode/src/tsdb/tsdbFS2.c b/source/dnode/vnode/src/tsdb/tsdbFS2.c index f43bb52d05..afe6ef6e1a 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS2.c +++ b/source/dnode/vnode/src/tsdb/tsdbFS2.c @@ -18,6 +18,8 @@ #include "vnd.h" #include "vndCos.h" +#define BLOCK_COMMIT_FACTOR 3 + extern int vnodeScheduleTask(int (*execute)(void *), void *arg); extern int vnodeScheduleTaskEx(int tpid, int (*execute)(void *), void *arg); extern void remove_file(const char *fname); @@ -65,11 +67,17 @@ static int32_t create_fs(STsdb *pTsdb, STFileSystem **fs) { fs[0]->bgTaskQueue->next = fs[0]->bgTaskQueue; fs[0]->bgTaskQueue->prev = fs[0]->bgTaskQueue; + taosThreadMutexInit(&fs[0]->commitMutex, NULL); + taosThreadCondInit(&fs[0]->canCommit, NULL); + fs[0]->blockCommit = false; + return 0; } static int32_t destroy_fs(STFileSystem **fs) { if (fs[0] == NULL) return 0; + taosThreadMutexDestroy(&fs[0]->commitMutex); + taosThreadCondDestroy(&fs[0]->canCommit); taosThreadMutexDestroy(fs[0]->mutex); ASSERT(fs[0]->bgTaskNum == 0); @@ -236,6 +244,7 @@ static int32_t load_fs(STsdb *pTsdb, const char *fname, TFileSetArray *arr) { code = TARRAY2_APPEND(arr, fset); TSDB_CHECK_CODE(code, lino, _exit); } + TARRAY2_SORT(arr, tsdbTFileSetCmprFn); } else { code = TSDB_CODE_FILE_CORRUPTED; TSDB_CHECK_CODE(code, lino, _exit); @@ -828,6 +837,27 @@ _exit: return code; } +static int32_t tsdbFSSetBlockCommit(STFileSystem *fs, bool block) { + taosThreadMutexLock(&fs->commitMutex); + if (block) { + fs->blockCommit = true; + } else { + fs->blockCommit = false; + taosThreadCondSignal(&fs->canCommit); + } + taosThreadMutexUnlock(&fs->commitMutex); + return 0; +} + +int32_t tsdbFSCheckCommit(STFileSystem *fs) { + taosThreadMutexLock(&fs->commitMutex); + while (fs->blockCommit) { + taosThreadCondWait(&fs->canCommit, &fs->commitMutex); + } + taosThreadMutexUnlock(&fs->commitMutex); + return 0; +} + int32_t tsdbFSEditCommit(STFileSystem *fs) { int32_t code = 0; int32_t lino = 0; @@ -837,19 +867,36 @@ int32_t tsdbFSEditCommit(STFileSystem *fs) { TSDB_CHECK_CODE(code, lino, _exit); // schedule merge - if (fs->tsdb->pVnode->config.sttTrigger != 1) { + if (fs->tsdb->pVnode->config.sttTrigger > 1) { STFileSet *fset; + int32_t sttTrigger = fs->tsdb->pVnode->config.sttTrigger; + bool schedMerge = false; + bool blockCommit = false; + TARRAY2_FOREACH_REVERSE(fs->fSetArr, fset) { if (TARRAY2_SIZE(fset->lvlArr) == 0) continue; SSttLvl *lvl = TARRAY2_FIRST(fset->lvlArr); - if (lvl->level != 0 || TARRAY2_SIZE(lvl->fobjArr) < fs->tsdb->pVnode->config.sttTrigger) continue; + if (lvl->level != 0) continue; + int32_t numFile = TARRAY2_SIZE(lvl->fobjArr); + if (numFile >= sttTrigger) { + schedMerge = true; + } + + if (numFile >= sttTrigger * BLOCK_COMMIT_FACTOR) { + blockCommit = true; + } + + if (schedMerge && blockCommit) break; + } + + if (schedMerge) { code = tsdbFSScheduleBgTask(fs, TSDB_BG_TASK_MERGER, tsdbMerge, NULL, fs->tsdb, NULL); TSDB_CHECK_CODE(code, lino, _exit); - - break; } + + tsdbFSSetBlockCommit(fs, blockCommit); } _exit: @@ -920,7 +967,6 @@ int32_t tsdbFSCreateRefSnapshot(STFileSystem *fs, TFileSetArray **fsetArr) { fsetArr[0] = taosMemoryCalloc(1, sizeof(*fsetArr[0])); if (fsetArr[0] == NULL) return TSDB_CODE_OUT_OF_MEMORY; - taosThreadRwlockRdlock(&fs->tsdb->rwLock); TARRAY2_FOREACH(fs->fSetArr, fset) { code = tsdbTFileSetInitRef(fs->tsdb, fset, &fset1); if (code) break; @@ -928,7 +974,6 @@ int32_t tsdbFSCreateRefSnapshot(STFileSystem *fs, TFileSetArray **fsetArr) { code = TARRAY2_APPEND(fsetArr[0], fset1); if (code) break; } - taosThreadRwlockUnlock(&fs->tsdb->rwLock); if (code) { TARRAY2_DESTROY(fsetArr[0], tsdbTFileSetClear); @@ -1103,4 +1148,4 @@ int32_t tsdbFSEnableBgTask(STFileSystem *fs) { fs->stop = false; taosThreadMutexUnlock(fs->mutex); return 0; -} +} \ No newline at end of file diff --git a/source/dnode/vnode/src/tsdb/tsdbFS2.h b/source/dnode/vnode/src/tsdb/tsdbFS2.h index e814ab2fff..b0f42a0c48 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS2.h +++ b/source/dnode/vnode/src/tsdb/tsdbFS2.h @@ -67,6 +67,7 @@ int32_t tsdbFSDisableBgTask(STFileSystem *fs); int32_t tsdbFSEnableBgTask(STFileSystem *fs); // other int32_t tsdbFSGetFSet(STFileSystem *fs, int32_t fid, STFileSet **fset); +int32_t tsdbFSCheckCommit(STFileSystem *fs); struct STFSBgTask { EFSBgTaskT type; @@ -103,6 +104,11 @@ struct STFileSystem { int32_t bgTaskNum; STFSBgTask bgTaskQueue[1]; STFSBgTask *bgTaskRunning; + + // block commit variables + TdThreadMutex commitMutex; + TdThreadCond canCommit; + bool blockCommit; }; #ifdef __cplusplus diff --git a/source/dnode/vnode/src/tsdb/tsdbFSet2.c b/source/dnode/vnode/src/tsdb/tsdbFSet2.c index 37c7e2ffc1..cd47a54973 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFSet2.c +++ b/source/dnode/vnode/src/tsdb/tsdbFSet2.c @@ -189,6 +189,7 @@ static int32_t tsdbJsonToSttLvl(STsdb *pTsdb, const cJSON *json, SSttLvl **lvl) code = TARRAY2_APPEND(lvl[0]->fobjArr, fobj); if (code) return code; } + TARRAY2_SORT(lvl[0]->fobjArr, tsdbTFileObjCmpr); return 0; } @@ -268,6 +269,7 @@ int32_t tsdbJsonToTFileSet(STsdb *pTsdb, const cJSON *json, STFileSet **fset) { code = TARRAY2_APPEND((*fset)->lvlArr, lvl); if (code) return code; } + TARRAY2_SORT((*fset)->lvlArr, tsdbSttLvlCmprFn); } else { return TSDB_CODE_FILE_CORRUPTED; } diff --git a/source/dnode/vnode/src/tsdb/tsdbMerge.c b/source/dnode/vnode/src/tsdb/tsdbMerge.c index ec0ea3c60f..42a8b5bb3f 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMerge.c +++ b/source/dnode/vnode/src/tsdb/tsdbMerge.c @@ -15,6 +15,8 @@ #include "tsdbMerge.h" +#define TSDB_MAX_LEVEL 6 // means max level is 7 + typedef struct { STsdb *tsdb; TFileSetArray *fsetArr; @@ -34,7 +36,6 @@ typedef struct { STFileSet *fset; bool toData; int32_t level; - SSttLvl *lvl; TABLEID tbid[1]; } ctx[1]; @@ -68,18 +69,6 @@ static int32_t tsdbMergerClose(SMerger *merger) { int32_t lino = 0; SVnode *pVnode = merger->tsdb->pVnode; - // edit file system - code = tsdbFSEditBegin(merger->tsdb->pFS, merger->fopArr, TSDB_FEDIT_MERGE); - TSDB_CHECK_CODE(code, lino, _exit); - - taosThreadRwlockWrlock(&merger->tsdb->rwLock); - code = tsdbFSEditCommit(merger->tsdb->pFS); - if (code) { - taosThreadRwlockUnlock(&merger->tsdb->rwLock); - TSDB_CHECK_CODE(code, lino, _exit); - } - taosThreadRwlockUnlock(&merger->tsdb->rwLock); - ASSERT(merger->writer == NULL); ASSERT(merger->dataIterMerger == NULL); ASSERT(merger->tombIterMerger == NULL); @@ -101,58 +90,142 @@ _exit: } static int32_t tsdbMergeFileSetBeginOpenReader(SMerger *merger) { - int32_t code = 0; - int32_t lino = 0; + int32_t code = 0; + int32_t lino = 0; + SSttLvl *lvl; - merger->ctx->toData = true; - merger->ctx->level = 0; - - // TODO: optimize merge strategy - for (int32_t i = 0;; ++i) { - if (i >= TARRAY2_SIZE(merger->ctx->fset->lvlArr)) { - merger->ctx->lvl = NULL; + bool hasLevelLargerThanMax = false; + TARRAY2_FOREACH_REVERSE(merger->ctx->fset->lvlArr, lvl) { + if (lvl->level <= TSDB_MAX_LEVEL) { + break; + } else if (TARRAY2_SIZE(lvl->fobjArr) > 0) { + hasLevelLargerThanMax = true; break; } + } - merger->ctx->lvl = TARRAY2_GET(merger->ctx->fset->lvlArr, i); - if (merger->ctx->lvl->level != merger->ctx->level || - TARRAY2_SIZE(merger->ctx->lvl->fobjArr) + 1 < merger->sttTrigger) { - merger->ctx->toData = false; - merger->ctx->lvl = NULL; - break; + if (hasLevelLargerThanMax) { + // merge all stt files + merger->ctx->toData = true; + merger->ctx->level = TSDB_MAX_LEVEL; + + TARRAY2_FOREACH(merger->ctx->fset->lvlArr, lvl) { + int32_t numMergeFile = TARRAY2_SIZE(lvl->fobjArr); + + for (int32_t i = 0; i < numMergeFile; ++i) { + STFileObj *fobj = TARRAY2_GET(lvl->fobjArr, i); + + STFileOp op = { + .optype = TSDB_FOP_REMOVE, + .fid = merger->ctx->fset->fid, + .of = fobj->f[0], + }; + code = TARRAY2_APPEND(merger->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + + SSttFileReader *reader; + SSttFileReaderConfig config = { + .tsdb = merger->tsdb, + .szPage = merger->szPage, + .file[0] = fobj->f[0], + }; + + code = tsdbSttFileReaderOpen(fobj->fname, &config, &reader); + TSDB_CHECK_CODE(code, lino, _exit); + + code = TARRAY2_APPEND(merger->sttReaderArr, reader); + TSDB_CHECK_CODE(code, lino, _exit); + } + } + } else { + // do regular merge + merger->ctx->toData = true; + merger->ctx->level = 0; + + // find the highest level that can be merged to + for (int32_t i = 0, numCarry = 0;;) { + int32_t numFile = numCarry; + if (i < TARRAY2_SIZE(merger->ctx->fset->lvlArr) && + merger->ctx->level == TARRAY2_GET(merger->ctx->fset->lvlArr, i)->level) { + numFile += TARRAY2_SIZE(TARRAY2_GET(merger->ctx->fset->lvlArr, i)->fobjArr); + i++; + } + + numCarry = numFile / merger->sttTrigger; + if (numCarry == 0) { + break; + } else { + merger->ctx->level++; + } } - merger->ctx->level++; + ASSERT(merger->ctx->level > 0); - STFileObj *fobj; - int32_t numFile = 0; - TARRAY2_FOREACH(merger->ctx->lvl->fobjArr, fobj) { - if (numFile == merger->sttTrigger) { + if (merger->ctx->level <= TSDB_MAX_LEVEL) { + TARRAY2_FOREACH_REVERSE(merger->ctx->fset->lvlArr, lvl) { + if (TARRAY2_SIZE(lvl->fobjArr) == 0) { + continue; + } + + if (lvl->level >= merger->ctx->level) { + merger->ctx->toData = false; + } + break; + } + } + + // get number of level-0 files to merge + int32_t numFile = pow(merger->sttTrigger, merger->ctx->level); + TARRAY2_FOREACH(merger->ctx->fset->lvlArr, lvl) { + if (lvl->level == 0) continue; + if (lvl->level >= merger->ctx->level) break; + + numFile = numFile - TARRAY2_SIZE(lvl->fobjArr) * pow(merger->sttTrigger, lvl->level); + } + + ASSERT(numFile >= 0); + + // get file system operations + TARRAY2_FOREACH(merger->ctx->fset->lvlArr, lvl) { + if (lvl->level >= merger->ctx->level) { break; } - STFileOp op = { - .optype = TSDB_FOP_REMOVE, - .fid = merger->ctx->fset->fid, - .of = fobj->f[0], - }; - code = TARRAY2_APPEND(merger->fopArr, op); - TSDB_CHECK_CODE(code, lino, _exit); + int32_t numMergeFile; + if (lvl->level == 0) { + numMergeFile = numFile; + } else { + numMergeFile = TARRAY2_SIZE(lvl->fobjArr); + } - SSttFileReader *reader; - SSttFileReaderConfig config = { - .tsdb = merger->tsdb, - .szPage = merger->szPage, - .file[0] = fobj->f[0], - }; + for (int32_t i = 0; i < numMergeFile; ++i) { + STFileObj *fobj = TARRAY2_GET(lvl->fobjArr, i); - code = tsdbSttFileReaderOpen(fobj->fname, &config, &reader); - TSDB_CHECK_CODE(code, lino, _exit); + STFileOp op = { + .optype = TSDB_FOP_REMOVE, + .fid = merger->ctx->fset->fid, + .of = fobj->f[0], + }; + code = TARRAY2_APPEND(merger->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); - code = TARRAY2_APPEND(merger->sttReaderArr, reader); - TSDB_CHECK_CODE(code, lino, _exit); + SSttFileReader *reader; + SSttFileReaderConfig config = { + .tsdb = merger->tsdb, + .szPage = merger->szPage, + .file[0] = fobj->f[0], + }; - numFile++; + code = tsdbSttFileReaderOpen(fobj->fname, &config, &reader); + TSDB_CHECK_CODE(code, lino, _exit); + + code = TARRAY2_APPEND(merger->sttReaderArr, reader); + TSDB_CHECK_CODE(code, lino, _exit); + } + } + + if (merger->ctx->level > TSDB_MAX_LEVEL) { + merger->ctx->level = TSDB_MAX_LEVEL; } } @@ -265,6 +338,8 @@ static int32_t tsdbMergeFileSetBegin(SMerger *merger) { ASSERT(merger->dataIterMerger == NULL); ASSERT(merger->writer == NULL); + TARRAY2_CLEAR(merger->fopArr, NULL); + merger->ctx->tbid->suid = 0; merger->ctx->tbid->uid = 0; @@ -317,6 +392,18 @@ static int32_t tsdbMergeFileSetEnd(SMerger *merger) { code = tsdbMergeFileSetEndCloseReader(merger); TSDB_CHECK_CODE(code, lino, _exit); + // edit file system + code = tsdbFSEditBegin(merger->tsdb->pFS, merger->fopArr, TSDB_FEDIT_MERGE); + TSDB_CHECK_CODE(code, lino, _exit); + + taosThreadRwlockWrlock(&merger->tsdb->rwLock); + code = tsdbFSEditCommit(merger->tsdb->pFS); + if (code) { + taosThreadRwlockUnlock(&merger->tsdb->rwLock); + TSDB_CHECK_CODE(code, lino, _exit); + } + taosThreadRwlockUnlock(&merger->tsdb->rwLock); + _exit: if (code) { TSDB_ERROR_LOG(TD_VID(merger->tsdb->pVnode), lino, code); @@ -434,7 +521,9 @@ int32_t tsdbMerge(void *arg) { .sttTrigger = tsdb->pVnode->config.sttTrigger, }}; - ASSERT(merger->sttTrigger > 1); + if (merger->sttTrigger <= 1) { + return 0; + } code = tsdbFSCreateCopySnapshot(tsdb->pFS, &merger->fsetArr); TSDB_CHECK_CODE(code, lino, _exit); @@ -447,6 +536,9 @@ int32_t tsdbMerge(void *arg) { _exit: if (code) { TSDB_ERROR_LOG(TD_VID(tsdb->pVnode), lino, code); + tsdbFatal("vgId:%d, failed to merge stt files since %s. code:%d", TD_VID(tsdb->pVnode), terrstr(), code); + taosMsleep(100); + exit(EXIT_FAILURE); } else if (merger->ctx->opened) { tsdbDebug("vgId:%d %s done", TD_VID(tsdb->pVnode), __func__); } diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c index 4705d95c0e..260f4d8b2d 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c +++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c @@ -710,6 +710,25 @@ static FORCE_INLINE int32_t tLDataIterDescCmprFn(const SRBTreeNode *p1, const SR return -1 * tLDataIterCmprFn(p1, p2); } +static void adjustValidLDataIters(SArray *pLDIterList, int32_t numOfFileObj) { + int32_t size = taosArrayGetSize(pLDIterList); + + if (size < numOfFileObj) { + int32_t inc = numOfFileObj - size; + for (int32_t k = 0; k < inc; ++k) { + SLDataIter *pIter = taosMemoryCalloc(1, sizeof(SLDataIter)); + taosArrayPush(pLDIterList, &pIter); + } + } else if (size > numOfFileObj) { // remove unused LDataIter + int32_t inc = size - numOfFileObj; + + for (int i = 0; i < inc; ++i) { + SLDataIter *pIter = taosArrayPop(pLDIterList); + destroyLDataIter(pIter); + } + } +} + int32_t tMergeTreeOpen2(SMergeTree *pMTree, SMergeTreeConf *pConf) { int32_t code = TSDB_CODE_SUCCESS; @@ -725,45 +744,33 @@ int32_t tMergeTreeOpen2(SMergeTree *pMTree, SMergeTreeConf *pConf) { pMTree->ignoreEarlierTs = false; - int32_t size = ((STFileSet *)pConf->pCurrentFileset)->lvlArr->size; - if (size == 0) { + // no data exists, go to end + int32_t numOfLevels = ((STFileSet *)pConf->pCurrentFileset)->lvlArr->size; + if (numOfLevels == 0) { goto _end; } // add the list/iter placeholder - while (taosArrayGetSize(pConf->pSttFileBlockIterArray) < size) { + while (taosArrayGetSize(pConf->pSttFileBlockIterArray) < numOfLevels) { SArray *pList = taosArrayInit(4, POINTER_BYTES); taosArrayPush(pConf->pSttFileBlockIterArray, &pList); } - for (int32_t j = 0; j < size; ++j) { + for (int32_t j = 0; j < numOfLevels; ++j) { SSttLvl *pSttLevel = ((STFileSet *)pConf->pCurrentFileset)->lvlArr->data[j]; - ASSERT(pSttLevel->level == j); + SArray *pList = taosArrayGetP(pConf->pSttFileBlockIterArray, j); - SArray *pList = taosArrayGetP(pConf->pSttFileBlockIterArray, j); - int32_t numOfIter = taosArrayGetSize(pList); + int32_t numOfFileObj = TARRAY2_SIZE(pSttLevel->fobjArr); + adjustValidLDataIters(pList, numOfFileObj); - if (numOfIter < TARRAY2_SIZE(pSttLevel->fobjArr)) { - int32_t inc = TARRAY2_SIZE(pSttLevel->fobjArr) - numOfIter; - for (int32_t k = 0; k < inc; ++k) { - SLDataIter *pIter = taosMemoryCalloc(1, sizeof(SLDataIter)); - taosArrayPush(pList, &pIter); - } - } else if (numOfIter > TARRAY2_SIZE(pSttLevel->fobjArr)){ - int32_t inc = numOfIter - TARRAY2_SIZE(pSttLevel->fobjArr); - for (int i = 0; i < inc; ++i) { - SLDataIter *pIter = taosArrayPop(pList); - destroyLDataIter(pIter); - } - } - - for (int32_t i = 0; i < TARRAY2_SIZE(pSttLevel->fobjArr); ++i) { // open all last file + for (int32_t i = 0; i < numOfFileObj; ++i) { // open all last file SLDataIter *pIter = taosArrayGetP(pList, i); SSttFileReader *pSttFileReader = pIter->pReader; SSttBlockLoadInfo *pLoadInfo = pIter->pBlockLoadInfo; - // open stt file reader if not + // open stt file reader if not opened yet + // if failed to open this stt file, ignore the error and try next one if (pSttFileReader == NULL) { SSttFileReaderConfig conf = {.tsdb = pConf->pTsdb, .szPage = pConf->pTsdb->pVnode->config.tsdbPageSize}; conf.file[0] = *pSttLevel->fobjArr->data[i]->f; diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 03f72662cf..868529e4dd 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -1120,8 +1120,8 @@ static bool getNeighborBlockOfSameTable(SDataBlockIter* pBlockIter, SFileDataBlo // *nextIndex = pBlockInfo->tbBlockIdx + step; // *pBlockIndex = *(SBlockIndex*)taosArrayGet(pTableBlockScanInfo->pBlockList, *nextIndex); STableDataBlockIdx* pTableDataBlockIdx = taosArrayGet(pTableBlockScanInfo->pBlockIdxList, pBlockInfo->tbBlockIdx + step); - SBrinRecord* p = taosArrayGet(pBlockIter->blockList, pTableDataBlockIdx->globalIndex); - memcpy(pRecord, p, sizeof(SBrinRecord)); + SFileDataBlockInfo* p = taosArrayGet(pBlockIter->blockList, pTableDataBlockIdx->globalIndex); + memcpy(pRecord, &p->record, sizeof(SBrinRecord)); *nextIndex = pBlockInfo->tbBlockIdx + step; @@ -1652,8 +1652,8 @@ static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader, // create local variable to hold the row value TSDBROW fRow = {.iRow = pRow->iRow, .type = TSDBROW_COL_FMT, .pBlockData = pRow->pBlockData}; - tsdbTrace("fRow ptr:%p, %d, uid:%" PRIu64 ", %s", pRow->pBlockData, pRow->iRow, pLastBlockReader->uid, - pReader->idStr); + tsdbTrace("fRow ptr:%p, %d, uid:%" PRIu64 ", ts:%" PRId64 " %s", pRow->pBlockData, pRow->iRow, pLastBlockReader->uid, + fRow.pBlockData->aTSKEY[fRow.iRow], pReader->idStr); // only last block exists if ((!mergeBlockData) || (tsLastBlock != pBlockData->aTSKEY[pDumpInfo->rowIndex])) { @@ -4935,11 +4935,12 @@ int32_t tsdbTakeReadSnap2(STsdbReader* pReader, _query_reseek_func_t reseek, STs tsdbRefMemTable(pTsdb->imem, pSnap->pINode); } + // fs + code = tsdbFSCreateRefSnapshot(pTsdb->pFS, &pSnap->pfSetArray); + // unlock taosThreadRwlockUnlock(&pTsdb->rwLock); - // fs - code = tsdbFSCreateRefSnapshot(pTsdb->pFS, &pSnap->pfSetArray); if (code == TSDB_CODE_SUCCESS) { tsdbTrace("vgId:%d, take read snapshot", TD_VID(pTsdb->pVnode)); } diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c index ed4257b86d..e4011ca400 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c +++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c @@ -424,7 +424,10 @@ int32_t tsdbSnapReaderOpen(STsdb* tsdb, int64_t sver, int64_t ever, int8_t type, reader[0]->ever = ever; reader[0]->type = type; + taosThreadRwlockRdlock(&tsdb->rwLock); code = tsdbFSCreateRefSnapshot(tsdb->pFS, &reader[0]->fsetArr); + taosThreadRwlockUnlock(&tsdb->rwLock); + TSDB_CHECK_CODE(code, lino, _exit); _exit: @@ -1045,6 +1048,7 @@ int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWr writer[0]->precision = pTsdb->keepCfg.precision; writer[0]->minRow = pTsdb->pVnode->config.tsdbCfg.minRows; writer[0]->maxRow = pTsdb->pVnode->config.tsdbCfg.maxRows; + writer[0]->cmprAlg = pTsdb->pVnode->config.tsdbCfg.compression; writer[0]->commitID = tsdbFSAllocEid(pTsdb->pFS); writer[0]->szPage = pTsdb->pVnode->config.tsdbPageSize; writer[0]->compactVersion = INT64_MAX; diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil.c b/source/dnode/vnode/src/tsdb/tsdbUtil.c index 6b1beef8e2..87917cd243 100644 --- a/source/dnode/vnode/src/tsdb/tsdbUtil.c +++ b/source/dnode/vnode/src/tsdb/tsdbUtil.c @@ -516,10 +516,13 @@ int32_t tGetDelData(uint8_t *p, void *ph) { } int32_t tsdbKeyFid(TSKEY key, int32_t minutes, int8_t precision) { + int64_t fid; if (key < 0) { - return (int)((key + 1) / tsTickPerMin[precision] / minutes - 1); + fid = ((key + 1) / tsTickPerMin[precision] / minutes - 1); + return (fid < INT32_MIN) ? INT32_MIN : (int32_t)fid; } else { - return (int)((key / tsTickPerMin[precision] / minutes)); + fid = ((key / tsTickPerMin[precision] / minutes)); + return (fid > INT32_MAX) ? INT32_MAX : (int32_t)fid; } } diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 35b9d6fa3a..efa722d41a 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -160,7 +160,7 @@ static int32_t vnodePreProcessDropTtlMsg(SVnode *pVnode, SRpcMsg *pMsg) { } { // find expired uids - tbUids = taosArrayInit(8, sizeof(int64_t)); + tbUids = taosArrayInit(8, sizeof(tb_uid_t)); if (tbUids == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; TSDB_CHECK_CODE(code, lino, _exit); @@ -947,16 +947,14 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t ver, void *pReq, taosArrayPush(rsp.pArray, &cRsp); - int32_t clusterId = pVnode->config.syncCfg.nodeInfo[0].clusterId; + if(pCreateReq->sqlLen > 0){ //skip auto create table, not set sql when auto create table + int32_t clusterId = pVnode->config.syncCfg.nodeInfo[0].clusterId; - char detail[1000] = {0}; - sprintf(detail, "btime:%" PRId64 ", flags:%d, ttl:%d, type:%d", - pCreateReq->btime, pCreateReq->flags, pCreateReq->ttl, pCreateReq->type); + SName name = {0}; + tNameFromString(&name, pVnode->config.dbname, T_NAME_ACCT | T_NAME_DB); - SName name = {0}; - tNameFromString(&name, pVnode->config.dbname, T_NAME_ACCT | T_NAME_DB); - - auditRecord(pReq, clusterId, "createTable", name.dbname, pCreateReq->name, detail); + auditRecord(pReq, clusterId, "createTable", name.dbname, pCreateReq->name, pCreateReq->sql, pCreateReq->sqlLen); + } } vDebug("vgId:%d, add %d new created tables into query table list", TD_VID(pVnode), (int32_t)taosArrayGetSize(tbUids)); @@ -981,6 +979,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t ver, void *pReq, _exit: for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { pCreateReq = req.pReqs + iReq; + taosMemoryFree(pCreateReq->sql); taosMemoryFree(pCreateReq->comment); taosArrayDestroy(pCreateReq->ctb.tagName); } @@ -1880,7 +1879,6 @@ static int32_t vnodeProcessDeleteReq(SVnode *pVnode, int64_t ver, void *pReq, in tDecoderInit(pCoder, pReq, len); tDecodeDeleteRes(pCoder, pRes); - ASSERT(taosArrayGetSize(pRes->uidList) == 0 || (pRes->skey != 0 && pRes->ekey != 0)); for (int32_t iUid = 0; iUid < taosArrayGetSize(pRes->uidList); iUid++) { uint64_t uid = *(uint64_t *)taosArrayGet(pRes->uidList, iUid); diff --git a/source/libs/audit/src/auditMain.c b/source/libs/audit/src/auditMain.c index d4b6465ac7..c408f0d87b 100644 --- a/source/libs/audit/src/auditMain.c +++ b/source/libs/audit/src/auditMain.c @@ -30,14 +30,16 @@ int32_t auditInit(const SAuditCfg *pCfg) { return 0; } -extern void auditRecordImp(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, char *detail); +extern void auditRecordImp(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, + char *detail, int32_t len); -void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, char *detail) { - auditRecordImp(pReq, clusterId, operation, target1, target2, detail); +void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, + char *detail, int32_t len) { + auditRecordImp(pReq, clusterId, operation, target1, target2, detail, len); } #ifndef TD_ENTERPRISE -void auditRecordImp(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, char *detail) { +void auditRecordImp(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, + char *detail, int32_t len) { } #endif - diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c index f975517669..44854d334b 100644 --- a/source/libs/catalog/src/catalog.c +++ b/source/libs/catalog/src/catalog.c @@ -165,7 +165,7 @@ int32_t ctgRefreshTbMeta(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgTbMetaCtx* } if (CTG_IS_META_NULL(output->metaType)) { - ctgError("no tbmeta got, tbNmae:%s", tNameGetTableName(ctx->pName)); + ctgError("no tbmeta got, tbName:%s", tNameGetTableName(ctx->pName)); ctgRemoveTbMetaFromCache(pCtg, ctx->pName, false); CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST); } diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c index fb5ecf7ad2..ba7106ea51 100644 --- a/source/libs/catalog/src/ctgAsync.c +++ b/source/libs/catalog/src/ctgAsync.c @@ -1171,7 +1171,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out; if (CTG_IS_META_NULL(pOut->metaType)) { - ctgError("no tbmeta got, tbNmae:%s", tNameGetTableName(pName)); + ctgError("no tbmeta got, tbName:%s", tNameGetTableName(pName)); ctgRemoveTbMetaFromCache(pCtg, pName, false); CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST); } @@ -1341,7 +1341,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out; if (CTG_IS_META_NULL(pOut->metaType)) { - ctgTaskError("no tbmeta got, tbNmae:%s", tNameGetTableName(pName)); + ctgTaskError("no tbmeta got, tbName:%s", tNameGetTableName(pName)); ctgRemoveTbMetaFromCache(pCtg, pName, false); CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST); } diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c index b541cdd411..5c8a1f456d 100644 --- a/source/libs/catalog/src/ctgCache.c +++ b/source/libs/catalog/src/ctgCache.c @@ -760,12 +760,14 @@ int32_t ctgGetCachedStbNameFromSuid(SCatalog* pCtg, char* dbFName, uint64_t suid char *stb = taosHashAcquire(dbCache->stbCache, &suid, sizeof(suid)); if (NULL == stb) { ctgDebug("stb 0x%" PRIx64 " not in cache, dbFName:%s", suid, dbFName); + ctgReleaseDBCache(pCtg, dbCache); return TSDB_CODE_SUCCESS; } *stbName = taosStrdup(stb); taosHashRelease(dbCache->stbCache, stb); + ctgReleaseDBCache(pCtg, dbCache); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/command/inc/commandInt.h b/source/libs/command/inc/commandInt.h index f74c61ea78..c704eb3951 100644 --- a/source/libs/command/inc/commandInt.h +++ b/source/libs/command/inc/commandInt.h @@ -112,6 +112,7 @@ extern "C" { #define COMMAND_CATALOG_DEBUG "catalogDebug" #define COMMAND_ENABLE_MEM_DEBUG "enableMemDebug" #define COMMAND_DISABLE_MEM_DEBUG "disableMemDebug" +#define COMMAND_ASYNCLOG "asynclog" typedef struct SExplainGroup { int32_t nodeNum; @@ -168,7 +169,7 @@ typedef struct SExplainCtx { } \ tlen += snprintf(tbuf + VARSTR_HEADER_SIZE + tlen, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE - tlen, __VA_ARGS__); \ } while (0) - + #define EXPLAIN_ROW_APPEND(...) tlen += snprintf(tbuf + VARSTR_HEADER_SIZE + tlen, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE - tlen, __VA_ARGS__) #define EXPLAIN_ROW_END() do { varDataSetLen(tbuf, tlen); tlen += VARSTR_HEADER_SIZE; isVerboseLine = true; } while (0) diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index bd20e96ac1..f204f239b4 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -760,6 +760,16 @@ static int32_t execAlterCmd(char* cmd, char* value, bool* processed) { return code; } qInfo("memory dbg disabled"); + } else if (0 == strcasecmp(cmd, COMMAND_ASYNCLOG)) { + int newAsyncLogValue = (strlen(value) == 0) ? 1 : atoi(value); + if (newAsyncLogValue != 0 && newAsyncLogValue != 1) { + code = TSDB_CODE_INVALID_CFG_VALUE; + qError("failed to alter asynclog, error:%s", tstrerror(code)); + goto _return; + } + + code = TSDB_CODE_SUCCESS; + tsAsyncLog = newAsyncLogValue; } else { goto _return; } diff --git a/source/libs/executor/src/aggregateoperator.c b/source/libs/executor/src/aggregateoperator.c index 5e649af47e..2d0a044559 100644 --- a/source/libs/executor/src/aggregateoperator.c +++ b/source/libs/executor/src/aggregateoperator.c @@ -335,6 +335,7 @@ static int32_t createDataBlockForEmptyInput(SOperatorInfo* pOperator, SSDataBloc colInfo.info.type = TSDB_DATA_TYPE_NULL; colInfo.info.bytes = 1; + SExprInfo* pOneExpr = &pOperator->exprSupp.pExprInfo[i]; for (int32_t j = 0; j < pOneExpr->base.numOfParams; ++j) { SFunctParam* pFuncParam = &pOneExpr->base.pParam[j]; @@ -354,6 +355,10 @@ static int32_t createDataBlockForEmptyInput(SOperatorInfo* pOperator, SSDataBloc } blockDataEnsureCapacity(pBlock, pBlock->info.rows); + for (int32_t i = 0; i < blockDataGetNumOfCols(pBlock); ++i) { + SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i); + colDataSetNULL(pColInfoData, 0); + } *ppBlock = pBlock; return TSDB_CODE_SUCCESS; diff --git a/source/libs/executor/src/dataInserter.c b/source/libs/executor/src/dataInserter.c index e47cbb7eba..f301ddf4be 100644 --- a/source/libs/executor/src/dataInserter.c +++ b/source/libs/executor/src/dataInserter.c @@ -289,8 +289,8 @@ int32_t buildSubmitReqFromBlock(SDataInserterHandle* pInserter, SSubmitReq2** pp } if (disorderTs) { - tRowSort(tbData.aRowP); - if ((terrno = tRowMerge(tbData.aRowP, (STSchema*)pTSchema, 0)) != 0) { + if ((tRowSort(tbData.aRowP) != TSDB_CODE_SUCCESS) || + (terrno = tRowMerge(tbData.aRowP, (STSchema*)pTSchema, 0)) != 0) { goto _end; } } diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index f6b0a87f54..467a49b37a 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -1145,7 +1145,8 @@ static SSDataBlock* doStreamHashPartition(SOperatorInfo* pOperator) { } break; case STREAM_CREATE_CHILD_TABLE: case STREAM_RETRIEVE: - case STREAM_CHECKPOINT: { + case STREAM_CHECKPOINT: + case STREAM_GET_ALL: { return pBlock; } default: diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index 00b246afad..ab7a15eacd 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -293,7 +293,8 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { // for stream interval if (pBlock->info.type == STREAM_RETRIEVE || pBlock->info.type == STREAM_DELETE_RESULT || - pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_CREATE_CHILD_TABLE) { + pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_CREATE_CHILD_TABLE || + pBlock->info.type == STREAM_CHECKPOINT) { return pBlock; } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 0cd60d8288..8dbb8a979e 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2218,7 +2218,9 @@ FETCH_NEXT_BLOCK: if (pSDB) { STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info; pSDB->info.type = pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RANGE ? STREAM_NORMAL : STREAM_PULL_DATA; - checkUpdateData(pInfo, true, pSDB, false); + if (!pInfo->igCheckUpdate && pInfo->pUpdateInfo) { + checkUpdateData(pInfo, true, pSDB, false); + } printSpecDataBlock(pSDB, getStreamOpName(pOperator->operatorType), "update", GET_TASKID(pTaskInfo)); calBlockTbName(pInfo, pSDB); return pSDB; diff --git a/source/libs/executor/src/streamtimewindowoperator.c b/source/libs/executor/src/streamtimewindowoperator.c index c9da3c99e7..2eb6fb2d64 100644 --- a/source/libs/executor/src/streamtimewindowoperator.c +++ b/source/libs/executor/src/streamtimewindowoperator.c @@ -369,10 +369,25 @@ static void doBuildDeleteResult(SStreamIntervalOperatorInfo* pInfo, SArray* pWin } } +void clearGroupResInfo(SGroupResInfo* pGroupResInfo) { + if (pGroupResInfo->freeItem) { + int32_t size = taosArrayGetSize(pGroupResInfo->pRows); + for (int32_t i = pGroupResInfo->index; i < size; i++) { + void* pVal = taosArrayGetP(pGroupResInfo->pRows, i); + taosMemoryFree(pVal); + } + pGroupResInfo->freeItem = false; + } + pGroupResInfo->pRows = taosArrayDestroy(pGroupResInfo->pRows); + pGroupResInfo->index = 0; +} + void destroyStreamFinalIntervalOperatorInfo(void* param) { SStreamIntervalOperatorInfo* pInfo = (SStreamIntervalOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); cleanupAggSup(&pInfo->aggSup); + clearGroupResInfo(&pInfo->groupResInfo); + // it should be empty. void* pIte = NULL; while ((pIte = taosHashIterate(pInfo->pPullDataMap, pIte)) != NULL) { @@ -389,7 +404,6 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) { nodesDestroyNode((SNode*)pInfo->pPhyNode); colDataDestroy(&pInfo->twAggSup.timeWindowData); - pInfo->groupResInfo.pRows = taosArrayDestroy(pInfo->groupResInfo.pRows); cleanupExprSupp(&pInfo->scalarSupp); tSimpleHashCleanup(pInfo->pUpdatedMap); pInfo->pUpdatedMap = NULL; @@ -1023,7 +1037,7 @@ int32_t doStreamIntervalEncodeOpState(void** buf, int32_t len, SOperatorInfo* pO while ((pIte = taosHashIterate(pInfo->pPullDataMap, pIte)) != NULL) { void* key = taosHashGetKey(pIte, &keyLen); tlen += encodeSWinKey(buf, key); - SArray* pArray = (SArray*)pIte; + SArray* pArray = *(SArray**)pIte; int32_t chSize = taosArrayGetSize(pArray); tlen += taosEncodeFixedI32(buf, chSize); for (int32_t i = 0; i < chSize; i++) { @@ -1530,6 +1544,7 @@ void destroyStreamSessionAggOperatorInfo(void* param) { cleanupBasicInfo(&pInfo->binfo); destroyStreamAggSupporter(&pInfo->streamAggSup); cleanupExprSupp(&pInfo->scalarSupp); + clearGroupResInfo(&pInfo->groupResInfo); if (pInfo->pChildren != NULL) { int32_t size = taosArrayGetSize(pInfo->pChildren); @@ -3025,7 +3040,7 @@ void destroyStreamStateOperatorInfo(void* param) { SStreamStateAggOperatorInfo* pInfo = (SStreamStateAggOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); destroyStreamAggSupporter(&pInfo->streamAggSup); - cleanupGroupResInfo(&pInfo->groupResInfo); + clearGroupResInfo(&pInfo->groupResInfo); cleanupExprSupp(&pInfo->scalarSupp); if (pInfo->pChildren != NULL) { int32_t size = taosArrayGetSize(pInfo->pChildren); diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index c62b5946dc..68a83fa662 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -651,7 +651,7 @@ static int32_t translateApercentileImpl(SFunctionNode* pFunc, char* pErrBuf, int (SDataType){.bytes = getApercentileMaxSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; } else { // original percent param is reserved - if (2 != numOfParams) { + if (3 != numOfParams && 2 != numOfParams) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; @@ -660,6 +660,19 @@ static int32_t translateApercentileImpl(SFunctionNode* pFunc, char* pErrBuf, int return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + if (3 == numOfParams) { + uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type; + if (!IS_STR_DATA_TYPE(para3Type)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + SNode* pParamNode2 = nodesListGetNode(pFunc->pParameterList, 2); + if (QUERY_NODE_VALUE != nodeType(pParamNode2) || !validateApercentileAlgo((SValueNode*)pParamNode2)) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "Third parameter algorithm of apercentile must be 'default' or 't-digest'"); + } + } + pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; } @@ -744,7 +757,11 @@ int32_t topBotCreateMergeParam(SNodeList* pRawParameters, SNode* pPartialRes, SN } int32_t apercentileCreateMergeParam(SNodeList* pRawParameters, SNode* pPartialRes, SNodeList** pParameters) { - return reserveFirstMergeParam(pRawParameters, pPartialRes, pParameters); + int32_t code = reserveFirstMergeParam(pRawParameters, pPartialRes, pParameters); + if (TSDB_CODE_SUCCESS == code && pRawParameters->length >= 3) { + code = nodesListStrictAppend(*pParameters, nodesCloneNode(nodesListGetNode(pRawParameters, 2))); + } + return code; } static int32_t translateSpread(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { @@ -1786,7 +1803,7 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { } uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; - if (!IS_SIGNED_NUMERIC_TYPE(colType) && !IS_FLOAT_TYPE(colType) && TSDB_DATA_TYPE_BOOL != colType && + if (!IS_INTEGER_TYPE(colType) && !IS_FLOAT_TYPE(colType) && TSDB_DATA_TYPE_BOOL != colType && !IS_TIMESTAMP_TYPE(colType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1815,6 +1832,8 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { uint8_t resType; if (IS_SIGNED_NUMERIC_TYPE(colType) || IS_TIMESTAMP_TYPE(colType) || TSDB_DATA_TYPE_BOOL == colType) { resType = TSDB_DATA_TYPE_BIGINT; + } else if (IS_UNSIGNED_NUMERIC_TYPE(colType)) { + resType = TSDB_DATA_TYPE_UBIGINT; } else { resType = TSDB_DATA_TYPE_DOUBLE; } diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index bcbb3af950..071afe0159 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -1904,7 +1904,7 @@ int32_t apercentileFunction(SqlFunctionCtx* pCtx) { return TSDB_CODE_SUCCESS; } -static void apercentileTransferInfo(SAPercentileInfo* pInput, SAPercentileInfo* pOutput) { +static void apercentileTransferInfo(SAPercentileInfo* pInput, SAPercentileInfo* pOutput, bool* hasRes) { pOutput->percent = pInput->percent; pOutput->algo = pInput->algo; if (pOutput->algo == APERCT_ALGO_TDIGEST) { @@ -1915,6 +1915,10 @@ static void apercentileTransferInfo(SAPercentileInfo* pInput, SAPercentileInfo* return; } + if (hasRes) { + *hasRes = true; + } + buildTDigestInfo(pOutput); TDigest* pTDigest = pOutput->pTDigest; tdigestAutoFill(pTDigest, COMPRESSION); @@ -1931,6 +1935,10 @@ static void apercentileTransferInfo(SAPercentileInfo* pInput, SAPercentileInfo* return; } + if (hasRes) { + *hasRes = true; + } + buildHistogramInfo(pOutput); SHistogramInfo* pHisto = pOutput->pHisto; @@ -1970,12 +1978,13 @@ int32_t apercentileFunctionMerge(SqlFunctionCtx* pCtx) { qDebug("%s total %" PRId64 " rows will merge, %p", __FUNCTION__, pInput->numOfRows, pInfo->pHisto); + bool hasRes = false; int32_t start = pInput->startRowIndex; for (int32_t i = start; i < start + pInput->numOfRows; ++i) { char* data = colDataGetData(pCol, i); SAPercentileInfo* pInputInfo = (SAPercentileInfo*)varDataVal(data); - apercentileTransferInfo(pInputInfo, pInfo); + apercentileTransferInfo(pInputInfo, pInfo, &hasRes); } if (pInfo->algo != APERCT_ALGO_TDIGEST) { @@ -1984,7 +1993,7 @@ int32_t apercentileFunctionMerge(SqlFunctionCtx* pCtx) { pInfo->pHisto->numOfEntries, pInfo->pHisto); } - SET_VAL(pResInfo, 1, 1); + SET_VAL(pResInfo, hasRes ? 1 : 0, 1); return TSDB_CODE_SUCCESS; } @@ -2056,7 +2065,7 @@ int32_t apercentileCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) qDebug("%s start to combine apercentile, %p", __FUNCTION__, pDBuf->pHisto); - apercentileTransferInfo(pSBuf, pDBuf); + apercentileTransferInfo(pSBuf, pDBuf, NULL); pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes); pDResInfo->isNullRes &= pSResInfo->isNullRes; return TSDB_CODE_SUCCESS; @@ -2714,16 +2723,20 @@ static int32_t doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv, case TSDB_DATA_TYPE_BOOL: pDiffInfo->prev.i64 = *(bool*)pv ? 1 : 0; break; + case TSDB_DATA_TYPE_UTINYINT: case TSDB_DATA_TYPE_TINYINT: pDiffInfo->prev.i64 = *(int8_t*)pv; break; + case TSDB_DATA_TYPE_UINT: case TSDB_DATA_TYPE_INT: pDiffInfo->prev.i64 = *(int32_t*)pv; break; + case TSDB_DATA_TYPE_USMALLINT: case TSDB_DATA_TYPE_SMALLINT: pDiffInfo->prev.i64 = *(int16_t*)pv; break; case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_UBIGINT: case TSDB_DATA_TYPE_BIGINT: pDiffInfo->prev.i64 = *(int64_t*)pv; break; @@ -2745,6 +2758,7 @@ static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, int64_t ts) { pDiffInfo->prevTs = ts; switch (type) { + case TSDB_DATA_TYPE_UINT: case TSDB_DATA_TYPE_INT: { int32_t v = *(int32_t*)pv; int64_t delta = v - pDiffInfo->prev.i64; // direct previous may be null @@ -2758,6 +2772,7 @@ static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, break; } case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_UTINYINT: case TSDB_DATA_TYPE_TINYINT: { int8_t v = *(int8_t*)pv; int64_t delta = v - pDiffInfo->prev.i64; // direct previous may be null @@ -2769,6 +2784,7 @@ static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, pDiffInfo->prev.i64 = v; break; } + case TSDB_DATA_TYPE_USMALLINT: case TSDB_DATA_TYPE_SMALLINT: { int16_t v = *(int16_t*)pv; int64_t delta = v - pDiffInfo->prev.i64; // direct previous may be null @@ -2781,6 +2797,7 @@ static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, break; } case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_UBIGINT: case TSDB_DATA_TYPE_BIGINT: { int64_t v = *(int64_t*)pv; int64_t delta = v - pDiffInfo->prev.i64; // direct previous may be null diff --git a/source/libs/parser/src/parInsertUtil.c b/source/libs/parser/src/parInsertUtil.c index 79e305989b..3efb5dafcb 100644 --- a/source/libs/parser/src/parInsertUtil.c +++ b/source/libs/parser/src/parInsertUtil.c @@ -495,9 +495,9 @@ int32_t insMergeTableDataCxt(SHashObj* pTableHash, SArray** pVgDataBlocks) { tColDataSortMerge(pTableCxt->pData->aCol); } else { if (!pTableCxt->ordered) { - tRowSort(pTableCxt->pData->aRowP); + code = tRowSort(pTableCxt->pData->aRowP); } - if (!pTableCxt->ordered || pTableCxt->duplicateTs) { + if (code == TSDB_CODE_SUCCESS && (!pTableCxt->ordered || pTableCxt->duplicateTs)) { code = tRowMerge(pTableCxt->pData->aRowP, pTableCxt->pSchema, 0); } } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index a043be3790..1c31993a92 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -4667,9 +4667,133 @@ static int32_t checkCreateDatabase(STranslateContext* pCxt, SCreateDatabaseStmt* return checkDatabaseOptions(pCxt, pStmt->dbName, pStmt->pOptions); } +#define FILL_CMD_SQL(sql, sqlLen, pCmdReq, CMD_TYPE, genericCmd) \ + CMD_TYPE* pCmdReq = genericCmd; \ + char* cmdSql = taosMemoryMalloc(sqlLen); \ + if (cmdSql == NULL) { \ + return TSDB_CODE_OUT_OF_MEMORY; \ + } \ + memcpy(cmdSql, sql, sqlLen); \ + pCmdReq->sqlLen = sqlLen; \ + pCmdReq->sql = cmdSql; \ + +static int32_t fillCmdSql(STranslateContext* pCxt, int16_t msgType, void* pReq) { + const char* sql = pCxt->pParseCxt->pSql; + size_t sqlLen = pCxt->pParseCxt->sqlLen; + + switch (msgType) { + case TDMT_MND_CREATE_DB: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SCreateDbReq, pReq); + break; + } + case TDMT_MND_ALTER_DB: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SAlterDbReq, pReq); + break; + } + case TDMT_MND_DROP_DB: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SDropDbReq, pReq); + break; + } + case TDMT_MND_COMPACT_DB: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SCompactDbReq, pReq); + break; + } + + case TDMT_MND_TMQ_DROP_TOPIC: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SMDropTopicReq, pReq); + break; + } + + case TDMT_MND_BALANCE_VGROUP_LEADER: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SBalanceVgroupLeaderReq, pReq); + break; + } + case TDMT_MND_BALANCE_VGROUP: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SBalanceVgroupReq, pReq); + break; + } + case TDMT_MND_REDISTRIBUTE_VGROUP: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SRedistributeVgroupReq, pReq); + break; + } + case TDMT_MND_CREATE_STB: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SMCreateStbReq, pReq); + break; + } + case TDMT_MND_DROP_STB: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SMDropStbReq, pReq); + break; + } + case TDMT_MND_ALTER_STB: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SMAlterStbReq, pReq); + break; + } + + case TDMT_MND_DROP_USER: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SDropUserReq, pReq); + break; + } + case TDMT_MND_CREATE_USER: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SCreateUserReq, pReq); + break; + } + case TDMT_MND_ALTER_USER: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SAlterUserReq, pReq); + break; + } + + case TDMT_MND_CREATE_QNODE: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SMCreateQnodeReq, pReq); + break; + } + case TDMT_MND_DROP_QNODE: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SMDropQnodeReq, pReq); + break; + } + + case TDMT_MND_CREATE_MNODE: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SMCreateMnodeReq, pReq); + break; + } + case TDMT_MND_DROP_MNODE: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SMDropMnodeReq, pReq); + break; + } + + case TDMT_MND_CREATE_DNODE: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SCreateDnodeReq, pReq); + break; + } + case TDMT_MND_DROP_DNODE: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SDropDnodeReq, pReq); + break; + } + case TDMT_MND_RESTORE_DNODE: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SRestoreDnodeReq, pReq); + break; + } + case TDMT_MND_CONFIG_DNODE: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SMCfgDnodeReq, pReq); + break; + } + + case TDMT_MND_DROP_STREAM: { + FILL_CMD_SQL(sql, sqlLen, pCmdReq, SMDropStreamReq, pReq); + break; + } + default: { + break; + } + + } + + return TSDB_CODE_SUCCESS; +} + typedef int32_t (*FSerializeFunc)(void* pBuf, int32_t bufLen, void* pReq); static int32_t buildCmdMsg(STranslateContext* pCxt, int16_t msgType, FSerializeFunc func, void* pReq) { + fillCmdSql(pCxt, msgType, pReq); pCxt->pCmdMsg = taosMemoryMalloc(sizeof(SCmdMsgInfo)); if (NULL == pCxt->pCmdMsg) { return TSDB_CODE_OUT_OF_MEMORY; @@ -4706,7 +4830,9 @@ static int32_t translateDropDatabase(STranslateContext* pCxt, SDropDatabaseStmt* tNameGetFullDbName(&name, dropReq.db); dropReq.ignoreNotExists = pStmt->ignoreNotExists; - return buildCmdMsg(pCxt, TDMT_MND_DROP_DB, (FSerializeFunc)tSerializeSDropDbReq, &dropReq); + int32_t code = buildCmdMsg(pCxt, TDMT_MND_DROP_DB, (FSerializeFunc)tSerializeSDropDbReq, &dropReq); + tFreeSDropDbReq(&dropReq); + return code; } static void buildAlterDbReq(STranslateContext* pCxt, SAlterDatabaseStmt* pStmt, SAlterDbReq* pReq) { @@ -4743,7 +4869,9 @@ static int32_t translateAlterDatabase(STranslateContext* pCxt, SAlterDatabaseStm SAlterDbReq alterReq = {0}; buildAlterDbReq(pCxt, pStmt, &alterReq); - return buildCmdMsg(pCxt, TDMT_MND_ALTER_DB, (FSerializeFunc)tSerializeSAlterDbReq, &alterReq); + code = buildCmdMsg(pCxt, TDMT_MND_ALTER_DB, (FSerializeFunc)tSerializeSAlterDbReq, &alterReq); + tFreeSAlterDbReq(&alterReq); + return code; } static int32_t translateTrimDatabase(STranslateContext* pCxt, STrimDatabaseStmt* pStmt) { @@ -5460,6 +5588,7 @@ static int32_t doTranslateDropSuperTable(STranslateContext* pCxt, const SName* p tNameExtractFullName(pTableName, dropReq.name); dropReq.igNotExists = ignoreNotExists; code = buildCmdMsg(pCxt, TDMT_MND_DROP_STB, (FSerializeFunc)tSerializeSMDropStbReq, &dropReq); + tFreeSMDropStbReq(&dropReq); } return code; } @@ -5775,7 +5904,9 @@ static int32_t translateDropUser(STranslateContext* pCxt, SDropUserStmt* pStmt) SDropUserReq dropReq = {0}; strcpy(dropReq.user, pStmt->userName); - return buildCmdMsg(pCxt, TDMT_MND_DROP_USER, (FSerializeFunc)tSerializeSDropUserReq, &dropReq); + int32_t code = buildCmdMsg(pCxt, TDMT_MND_DROP_USER, (FSerializeFunc)tSerializeSDropUserReq, &dropReq); + tFreeSDropUserReq(&dropReq); + return code; } static int32_t translateCreateDnode(STranslateContext* pCxt, SCreateDnodeStmt* pStmt) { @@ -5783,7 +5914,9 @@ static int32_t translateCreateDnode(STranslateContext* pCxt, SCreateDnodeStmt* p strcpy(createReq.fqdn, pStmt->fqdn); createReq.port = pStmt->port; - return buildCmdMsg(pCxt, TDMT_MND_CREATE_DNODE, (FSerializeFunc)tSerializeSCreateDnodeReq, &createReq); + int32_t code = buildCmdMsg(pCxt, TDMT_MND_CREATE_DNODE, (FSerializeFunc)tSerializeSCreateDnodeReq, &createReq); + tFreeSCreateDnodeReq(&createReq); + return code; } static int32_t translateDropDnode(STranslateContext* pCxt, SDropDnodeStmt* pStmt) { @@ -5794,7 +5927,9 @@ static int32_t translateDropDnode(STranslateContext* pCxt, SDropDnodeStmt* pStmt dropReq.force = pStmt->force; dropReq.unsafe = pStmt->unsafe; - return buildCmdMsg(pCxt, TDMT_MND_DROP_DNODE, (FSerializeFunc)tSerializeSDropDnodeReq, &dropReq); + int32_t code = buildCmdMsg(pCxt, TDMT_MND_DROP_DNODE, (FSerializeFunc)tSerializeSDropDnodeReq, &dropReq); + tFreeSDropDnodeReq(&dropReq); + return code; } static int32_t translateAlterDnode(STranslateContext* pCxt, SAlterDnodeStmt* pStmt) { @@ -5803,7 +5938,9 @@ static int32_t translateAlterDnode(STranslateContext* pCxt, SAlterDnodeStmt* pSt strcpy(cfgReq.config, pStmt->config); strcpy(cfgReq.value, pStmt->value); - return buildCmdMsg(pCxt, TDMT_MND_CONFIG_DNODE, (FSerializeFunc)tSerializeSMCfgDnodeReq, &cfgReq); + int32_t code = buildCmdMsg(pCxt, TDMT_MND_CONFIG_DNODE, (FSerializeFunc)tSerializeSMCfgDnodeReq, &cfgReq); + tFreeSMCfgDnodeReq(&cfgReq); + return code; } static int32_t translateRestoreDnode(STranslateContext* pCxt, SRestoreComponentNodeStmt* pStmt) { @@ -5825,7 +5962,10 @@ static int32_t translateRestoreDnode(STranslateContext* pCxt, SRestoreComponentN default: return -1; } - return buildCmdMsg(pCxt, TDMT_MND_RESTORE_DNODE, (FSerializeFunc)tSerializeSRestoreDnodeReq, &restoreReq); + + int32_t code = buildCmdMsg(pCxt, TDMT_MND_RESTORE_DNODE, (FSerializeFunc)tSerializeSRestoreDnodeReq, &restoreReq); + tFreeSRestoreDnodeReq(&restoreReq); + return code; } static int32_t getSmaIndexDstVgId(STranslateContext* pCxt, const char* pDbName, const char* pTableName, @@ -6095,8 +6235,10 @@ static int16_t getCreateComponentNodeMsgType(ENodeType type) { static int32_t translateCreateComponentNode(STranslateContext* pCxt, SCreateComponentNodeStmt* pStmt) { SMCreateQnodeReq createReq = {.dnodeId = pStmt->dnodeId}; - return buildCmdMsg(pCxt, getCreateComponentNodeMsgType(nodeType(pStmt)), + int32_t code = buildCmdMsg(pCxt, getCreateComponentNodeMsgType(nodeType(pStmt)), (FSerializeFunc)tSerializeSCreateDropMQSNodeReq, &createReq); + tFreeSMCreateQnodeReq(&createReq); + return code; } static int16_t getDropComponentNodeMsgType(ENodeType type) { @@ -6117,8 +6259,10 @@ static int16_t getDropComponentNodeMsgType(ENodeType type) { static int32_t translateDropComponentNode(STranslateContext* pCxt, SDropComponentNodeStmt* pStmt) { SDDropQnodeReq dropReq = {.dnodeId = pStmt->dnodeId}; - return buildCmdMsg(pCxt, getDropComponentNodeMsgType(nodeType(pStmt)), + int32_t code = buildCmdMsg(pCxt, getDropComponentNodeMsgType(nodeType(pStmt)), (FSerializeFunc)tSerializeSCreateDropMQSNodeReq, &dropReq); + tFreeSDDropQnodeReq(&dropReq); + return code; } static int32_t checkTopicQuery(STranslateContext* pCxt, SSelectStmt* pSelect) { @@ -6307,7 +6451,9 @@ static int32_t translateDropTopic(STranslateContext* pCxt, SDropTopicStmt* pStmt snprintf(dropReq.name, sizeof(dropReq.name), "%d.%s", pCxt->pParseCxt->acctId, pStmt->topicName); dropReq.igNotExists = pStmt->ignoreNotExists; - return buildCmdMsg(pCxt, TDMT_MND_TMQ_DROP_TOPIC, (FSerializeFunc)tSerializeSMDropTopicReq, &dropReq); + int32_t code = buildCmdMsg(pCxt, TDMT_MND_TMQ_DROP_TOPIC, (FSerializeFunc)tSerializeSMDropTopicReq, &dropReq); + tFreeSMDropTopicReq(&dropReq); + return code; } static int32_t translateDropCGroup(STranslateContext* pCxt, SDropCGroupStmt* pStmt) { @@ -6375,6 +6521,7 @@ static int32_t translateCompact(STranslateContext* pCxt, SCompactDatabaseStmt* p if (TSDB_CODE_SUCCESS == code) { code = buildCmdMsg(pCxt, TDMT_MND_COMPACT_DB, (FSerializeFunc)tSerializeSCompactDbReq, &compactReq); } + tFreeSCompactDbReq(&compactReq); return code; } @@ -7234,7 +7381,9 @@ static int32_t translateDropStream(STranslateContext* pCxt, SDropStreamStmt* pSt tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->streamName, strlen(pStmt->streamName)); tNameGetFullDbName(&name, dropReq.name); dropReq.igNotExists = pStmt->ignoreNotExists; - return buildCmdMsg(pCxt, TDMT_MND_DROP_STREAM, (FSerializeFunc)tSerializeSMDropStreamReq, &dropReq); + int32_t code = buildCmdMsg(pCxt, TDMT_MND_DROP_STREAM, (FSerializeFunc)tSerializeSMDropStreamReq, &dropReq); + tFreeSMDropStreamReq(&dropReq); + return code; } static int32_t translatePauseStream(STranslateContext* pCxt, SPauseStreamStmt* pStmt) { @@ -7429,18 +7578,24 @@ static int32_t translateRevoke(STranslateContext* pCxt, SRevokeStmt* pStmt) { strcpy(req.user, pStmt->userName); sprintf(req.objname, "%d.%s", pCxt->pParseCxt->acctId, pStmt->objName); sprintf(req.tabName, "%s", pStmt->tabName); - return buildCmdMsg(pCxt, TDMT_MND_ALTER_USER, (FSerializeFunc)tSerializeSAlterUserReq, &req); + int32_t code = buildCmdMsg(pCxt, TDMT_MND_ALTER_USER, (FSerializeFunc)tSerializeSAlterUserReq, &req); + tFreeSAlterUserReq(&req); + return code; } static int32_t translateBalanceVgroup(STranslateContext* pCxt, SBalanceVgroupStmt* pStmt) { SBalanceVgroupReq req = {0}; - return buildCmdMsg(pCxt, TDMT_MND_BALANCE_VGROUP, (FSerializeFunc)tSerializeSBalanceVgroupReq, &req); + int32_t code = buildCmdMsg(pCxt, TDMT_MND_BALANCE_VGROUP, (FSerializeFunc)tSerializeSBalanceVgroupReq, &req); + tFreeSBalanceVgroupReq(&req); + return code; } static int32_t translateBalanceVgroupLeader(STranslateContext* pCxt, SBalanceVgroupLeaderStmt* pStmt) { SBalanceVgroupLeaderReq req = {0}; req.vgId = pStmt->vgId; - return buildCmdMsg(pCxt, TDMT_MND_BALANCE_VGROUP_LEADER, (FSerializeFunc)tSerializeSBalanceVgroupLeaderReq, &req); + int32_t code = buildCmdMsg(pCxt, TDMT_MND_BALANCE_VGROUP_LEADER, (FSerializeFunc)tSerializeSBalanceVgroupLeaderReq, &req); + tFreeSBalanceVgroupLeaderReq(&req); + return code; } static int32_t translateMergeVgroup(STranslateContext* pCxt, SMergeVgroupStmt* pStmt) { @@ -7484,6 +7639,7 @@ static int32_t translateRedistributeVgroup(STranslateContext* pCxt, SRedistribut req.dnodeId3 = pStmt->dnodeId3; code = buildCmdMsg(pCxt, TDMT_MND_REDISTRIBUTE_VGROUP, (FSerializeFunc)tSerializeSRedistributeVgroupReq, &req); } + tFreeSRedistributeVgroupReq(&req); return code; } diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index a3baa5d43a..0e56615451 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -88,6 +88,7 @@ static EDealRes doRewriteExpr(SNode** pNode, void* pContext) { pCxt->pOutputs[index] = true; break; } + index++; } } break; @@ -174,6 +175,7 @@ static int32_t cloneRewriteExprs(SNodeList* pExprs, bool* pOutputs, SNodeList** break; } } + index++; } return code; } diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index f10c42310d..430e69f46f 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -806,7 +806,7 @@ static bool pushDownCondOptIsColEqualOnCond(SJoinLogicNode* pJoin, SNode* pCond, return false; } SOperatorNode* pOper = (SOperatorNode*)pCond; - if (QUERY_NODE_COLUMN != nodeType(pOper->pLeft) || QUERY_NODE_COLUMN != nodeType(pOper->pRight)) { + if (QUERY_NODE_COLUMN != nodeType(pOper->pLeft) || NULL == pOper->pRight || QUERY_NODE_COLUMN != nodeType(pOper->pRight)) { return false; } SColumnNode* pLeft = (SColumnNode*)(pOper->pLeft); @@ -3217,8 +3217,11 @@ int32_t stbJoinOptAddFuncToScanNode(char* funcName, SScanLogicNode* pScan) { SFunctionNode* pUidFunc = createFunction(funcName, NULL); snprintf(pUidFunc->node.aliasName, sizeof(pUidFunc->node.aliasName), "%s.%p", pUidFunc->functionName, pUidFunc); - nodesListStrictAppend(pScan->pScanPseudoCols, (SNode *)pUidFunc); - return createColumnByRewriteExpr((SNode*)pUidFunc, &pScan->node.pTargets); + int32_t code = nodesListStrictAppend(pScan->pScanPseudoCols, (SNode *)pUidFunc); + if (TSDB_CODE_SUCCESS == code) { + code = createColumnByRewriteExpr((SNode*)pUidFunc, &pScan->node.pTargets); + } + return code; } @@ -3365,12 +3368,7 @@ static int32_t stbJoinOptCreateTableScanNodes(SLogicNode* pJoin, SNodeList** ppL pScan->scanType = SCAN_TYPE_TABLE; } - if (TSDB_CODE_SUCCESS == code) { - *ppList = pList; - } else { - nodesDestroyList(pList); - *ppList = NULL; - } + *ppList = pList; return code; } @@ -3474,12 +3472,15 @@ static int32_t stbJoinOptCreateMergeJoinNode(SLogicNode* pOrig, SLogicNode* pChi FOREACH(pNode, pJoin->node.pChildren) { ERASE_NODE(pJoin->node.pChildren); } - nodesListStrictAppend(pJoin->node.pChildren, (SNode *)pChild); - pChild->pParent = (SLogicNode*)pJoin; + int32_t code = nodesListStrictAppend(pJoin->node.pChildren, (SNode *)pChild); + if (TSDB_CODE_SUCCESS == code) { + pChild->pParent = (SLogicNode*)pJoin; + *ppLogic = (SLogicNode*)pJoin; + } else { + nodesDestroyNode((SNode*)pJoin); + } - *ppLogic = (SLogicNode*)pJoin; - - return TSDB_CODE_SUCCESS; + return code; } static int32_t stbJoinOptCreateDynQueryCtrlNode(SLogicNode* pRoot, SLogicNode* pPrev, SLogicNode* pPost, bool* srcScan, SLogicNode** ppDynNode) { @@ -3519,11 +3520,18 @@ static int32_t stbJoinOptCreateDynQueryCtrlNode(SLogicNode* pRoot, SLogicNode* p nodesListStrictAppend(pDynCtrl->stbJoin.pUidList, nodesListGetNode(pHJoin->node.pTargets, 2)); nodesListStrictAppend(pDynCtrl->stbJoin.pVgList, nodesListGetNode(pHJoin->node.pTargets, 1)); nodesListStrictAppend(pDynCtrl->stbJoin.pVgList, nodesListGetNode(pHJoin->node.pTargets, 3)); - + if (TSDB_CODE_SUCCESS == code) { - nodesListStrictAppend(pDynCtrl->node.pChildren, (SNode*)pPrev); - nodesListStrictAppend(pDynCtrl->node.pChildren, (SNode*)pPost); - pDynCtrl->node.pTargets = nodesCloneList(pPost->pTargets); + code = nodesListStrictAppend(pDynCtrl->node.pChildren, (SNode*)pPrev); + if (TSDB_CODE_SUCCESS == code) { + code = nodesListStrictAppend(pDynCtrl->node.pChildren, (SNode*)pPost); + } + if (TSDB_CODE_SUCCESS == code) { + pDynCtrl->node.pTargets = nodesCloneList(pPost->pTargets); + if (!pDynCtrl->node.pTargets) { + code = TSDB_CODE_OUT_OF_MEMORY; + } + } } if (TSDB_CODE_SUCCESS == code) { diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index d55e80a23d..d6799a25a7 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -1025,11 +1025,7 @@ static int32_t createGroupCachePhysiNode(SPhysiPlanContext* pCxt, SNodeList* pCh } */ - if (TSDB_CODE_SUCCESS == code) { - *pPhyNode = (SPhysiNode*)pGrpCache; - } else { - nodesDestroyNode((SNode*)pGrpCache); - } + *pPhyNode = (SPhysiNode*)pGrpCache; return code; } @@ -1059,6 +1055,8 @@ static int32_t updateDynQueryCtrlStbJoinInfo(SPhysiPlanContext* pCxt, SNodeList* } pDynCtrl->stbJoin.batchFetch = pLogicNode->stbJoin.batchFetch; } + nodesDestroyList(pVgList); + nodesDestroyList(pUidList); return code; } diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index 12729c3262..adc3e9fd27 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -3983,18 +3983,15 @@ _return: return code; } -static int32_t fltSclGetDatumValueFromPoint(SFltSclPoint *point, SFltSclDatum *d) { +static int32_t fltSclGetTimeStampDatum(SFltSclPoint *point, SFltSclDatum *d) { *d = point->val; - if (point->val.kind == FLT_SCL_DATUM_KIND_NULL) { - return TSDB_CODE_SUCCESS; - } - if (point->val.kind == FLT_SCL_DATUM_KIND_MAX) { - getDataMax(d->type.type, &(d->i)); - } else if (point->val.kind == FLT_SCL_DATUM_KIND_MIN) { - getDataMin(d->type.type, &(d->i)); - } + d->kind = FLT_SCL_DATUM_KIND_INT64; - if (IS_INTEGER_TYPE(d->type.type) || IS_TIMESTAMP_TYPE(d->type.type)) { + if (point->val.kind == FLT_SCL_DATUM_KIND_MAX) { + getDataMax(point->val.type.type, &(d->i)); + } else if (point->val.kind == FLT_SCL_DATUM_KIND_MIN) { + getDataMin(point->val.type.type, &(d->i)); + } else if (point->val.kind == FLT_SCL_DATUM_KIND_INT64) { if (point->excl) { if (point->start) { ++d->i; @@ -4002,6 +3999,28 @@ static int32_t fltSclGetDatumValueFromPoint(SFltSclPoint *point, SFltSclDatum *d --d->i; } } + } else if (point->val.kind == FLT_SCL_DATUM_KIND_FLOAT64) { + double v = d->d; + if (point->excl) { + if (point->start) { + d->i = v + 1; + } else { + d->i = v - 1; + } + } else { + d->i = v; + } + } else if (point->val.kind == FLT_SCL_DATUM_KIND_UINT64) { + uint64_t v = d->u; + if (point->excl) { + if (point->start) { + d->i = v + 1; + } else { + d->i = v - 1; + } + } else { + d->i = v; + } } else { qError("not supported type %d when get datum from point", d->type.type); } @@ -4022,12 +4041,13 @@ int32_t filterGetTimeRange(SNode *pNode, STimeWindow *win, bool *isStrict) { SFltSclColumnRange *colRange = taosArrayGet(colRanges, 0); SArray *points = colRange->points; if (taosArrayGetSize(points) == 2) { + *win = TSWINDOW_DESC_INITIALIZER; SFltSclPoint *startPt = taosArrayGet(points, 0); SFltSclPoint *endPt = taosArrayGet(points, 1); SFltSclDatum start; SFltSclDatum end; - fltSclGetDatumValueFromPoint(startPt, &start); - fltSclGetDatumValueFromPoint(endPt, &end); + fltSclGetTimeStampDatum(startPt, &start); + fltSclGetTimeStampDatum(endPt, &end); win->skey = start.i; win->ekey = end.i; *isStrict = true; diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index cc6be68c85..3e003234cf 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -1671,6 +1671,9 @@ static int32_t sclGetJsonOperatorResType(SOperatorNode *pOp) { } static int32_t sclGetBitwiseOperatorResType(SOperatorNode *pOp) { + if (!pOp->pLeft || !pOp->pRight) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } SDataType ldt = ((SExprNode *)(pOp->pLeft))->resType; SDataType rdt = ((SExprNode *)(pOp->pRight))->resType; if(TSDB_DATA_TYPE_VARBINARY == ldt.type || TSDB_DATA_TYPE_VARBINARY == rdt.type){ diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c index e12c62ad87..c5789a65ca 100644 --- a/source/libs/scalar/src/sclvector.c +++ b/source/libs/scalar/src/sclvector.c @@ -329,6 +329,7 @@ static FORCE_INLINE void varToVarbinary(char *buf, SScalarParam *pOut, int32_t r if (t == NULL) { sclError("Out of memory"); terrno = TSDB_CODE_OUT_OF_MEMORY; + taosMemoryFree(data); return; } varDataSetLen(t, size); diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index df32ebc8aa..ce4feb38eb 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -178,6 +178,10 @@ void bkdMgtDestroy(SBackendManager* bm) { taosHashCleanup(bm->pSstTbl[0]); taosHashCleanup(bm->pSstTbl[1]); + + taosMemoryFree(bm->pCurrent); + taosMemoryFree(bm->pManifest); + taosMemoryFree(bm); } @@ -239,7 +243,7 @@ int32_t bkdMgtGetDelta(SBackendManager* bm, int64_t chkpId, SArray* list) { continue; } if (strlen(name) >= sstLen && strncmp(name + strlen(name) - 4, pSST, sstLen) == 0) { - char* p = taosStrdup(name); + // char* p = taosStrdup(name); taosHashPut(bm->pSstTbl[1 - bm->idx], name, strlen(name), &dummy, sizeof(dummy)); continue; } @@ -267,7 +271,7 @@ int32_t bkdMgtGetDelta(SBackendManager* bm, int64_t chkpId, SArray* list) { taosArrayClearP(bm->pDel, taosMemoryFree); taosHashClear(bm->pSstTbl[1 - bm->idx]); bm->update = 0; - + taosCloseDir(&pDir); return code; } @@ -280,6 +284,8 @@ int32_t bkdMgtGetDelta(SBackendManager* bm, int64_t chkpId, SArray* list) { taosHashClear(bm->pSstTbl[bm->idx]); bm->idx = 1 - bm->idx; + taosCloseDir(&pDir); + return 0; } @@ -287,8 +293,8 @@ int32_t bkdMgtDumpTo(SBackendManager* bm, char* dname) { int32_t code = 0; int32_t len = bm->len + 128; - char* dstBuf = taosMemoryCalloc(1, len); char* srcBuf = taosMemoryCalloc(1, len); + char* dstBuf = taosMemoryCalloc(1, len); char* srcDir = taosMemoryCalloc(1, len); char* dstDir = taosMemoryCalloc(1, len); @@ -357,6 +363,7 @@ int32_t bkdMgtDumpTo(SBackendManager* bm, char* dname) { taosArrayClearP(bm->pAdd, taosMemoryFree); taosArrayClearP(bm->pDel, taosMemoryFree); +_ERROR: taosMemoryFree(srcBuf); taosMemoryFree(dstBuf); taosMemoryFree(srcDir); @@ -392,7 +399,11 @@ int32_t copyFiles(const char* src, const char* dst) { char* dstName = taosMemoryCalloc(1, dLen + 64); TdDirPtr pDir = taosOpenDir(src); - if (pDir == NULL) return 0; + if (pDir == NULL) { + taosMemoryFree(srcName); + taosMemoryFree(dstName); + return -1; + } TdDirEntryPtr de = NULL; while ((de = taosReadDir(pDir)) != NULL) { @@ -448,7 +459,7 @@ int32_t rebuildDirFromCheckpoint(const char* path, int64_t chkpId, char** dst) { } else { stError("failed to start stream backend at %s, reason: %s, restart from default state dir:%s", chkp, - tstrerror(TAOS_SYSTEM_ERROR(errno)), state); + tstrerror(TAOS_SYSTEM_ERROR(errno)), state); taosMkDir(state); } taosMemoryFree(chkp); @@ -904,7 +915,7 @@ int32_t streamBackendTriggerChkp(void* arg, char* dst) { stError("stream backend:%p failed to do checkpoint at:%s", pHandle, dst); } else { stDebug("stream backend:%p end to do checkpoint at:%s, time cost:%" PRId64 "ms", pHandle, dst, - taosGetTimestampMs() - st); + taosGetTimestampMs() - st); } } else { stError("stream backend:%p failed to flush db at:%s", pHandle, dst); @@ -980,9 +991,9 @@ int32_t streamBackendDoCheckpoint(void* arg, uint64_t checkpointId) { stError("stream backend:%p failed to do checkpoint at:%s", pHandle, pChkpIdDir); } else { stDebug("stream backend:%p end to do checkpoint at:%s, time cost:%" PRId64 "ms", pHandle, pChkpIdDir, - taosGetTimestampMs() - st); + taosGetTimestampMs() - st); } - } else { + } else { stError("stream backend:%p failed to flush db at:%s", pHandle, pChkpIdDir); } // release all ref to cfWrapper; @@ -1706,7 +1717,7 @@ void streamStateCloseBackend(SStreamState* pState, bool remove) { char* status[] = {"close", "drop"}; stInfo("start to %s state %p on backendWrapper %p %s", status[remove == false ? 0 : 1], pState, wrapper, - wrapper->idstr); + wrapper->idstr); wrapper->remove |= remove; // update by other pState taosReleaseRef(streamBackendCfWrapperId, pState->pTdbState->backendCfWrapperId); } @@ -1779,35 +1790,36 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfKe ((rocksdb_column_family_handle_t**)wrapper->pHandle)[idx]); } -#define STREAM_STATE_PUT_ROCKSDB(pState, funcname, key, value, vLen) \ - do { \ - code = 0; \ - char buf[128] = {0}; \ - char* err = NULL; \ - int i = streamStateGetCfIdx(pState, funcname); \ - if (i < 0) { \ - stWarn("streamState failed to get cf name: %s", funcname); \ - code = -1; \ - break; \ - } \ - SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper; \ - char toString[128] = {0}; \ - if (qDebugFlag & DEBUG_TRACE) ginitDict[i].toStrFunc((void*)key, toString); \ - int32_t klen = ginitDict[i].enFunc((void*)key, buf); \ - rocksdb_column_family_handle_t* pHandle = ((rocksdb_column_family_handle_t**)wrapper->pHandle)[ginitDict[i].idx]; \ - rocksdb_t* db = wrapper->rocksdb; \ - rocksdb_writeoptions_t* opts = wrapper->writeOpts; \ - char* ttlV = NULL; \ - int32_t ttlVLen = ginitDict[i].enValueFunc((char*)value, vLen, 0, &ttlV); \ - rocksdb_put_cf(db, opts, pHandle, (const char*)buf, klen, (const char*)ttlV, (size_t)ttlVLen, &err); \ - if (err != NULL) { \ - stError("streamState str: %s failed to write to %s, err: %s", toString, funcname, err); \ - taosMemoryFree(err); \ - code = -1; \ - } else { \ - stTrace("streamState str:%s succ to write to %s, rowValLen:%d, ttlValLen:%d", toString, funcname, vLen, ttlVLen); \ - } \ - taosMemoryFree(ttlV); \ +#define STREAM_STATE_PUT_ROCKSDB(pState, funcname, key, value, vLen) \ + do { \ + code = 0; \ + char buf[128] = {0}; \ + char* err = NULL; \ + int i = streamStateGetCfIdx(pState, funcname); \ + if (i < 0) { \ + stWarn("streamState failed to get cf name: %s", funcname); \ + code = -1; \ + break; \ + } \ + SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper; \ + char toString[128] = {0}; \ + if (qDebugFlag & DEBUG_TRACE) ginitDict[i].toStrFunc((void*)key, toString); \ + int32_t klen = ginitDict[i].enFunc((void*)key, buf); \ + rocksdb_column_family_handle_t* pHandle = ((rocksdb_column_family_handle_t**)wrapper->pHandle)[ginitDict[i].idx]; \ + rocksdb_t* db = wrapper->rocksdb; \ + rocksdb_writeoptions_t* opts = wrapper->writeOpts; \ + char* ttlV = NULL; \ + int32_t ttlVLen = ginitDict[i].enValueFunc((char*)value, vLen, 0, &ttlV); \ + rocksdb_put_cf(db, opts, pHandle, (const char*)buf, klen, (const char*)ttlV, (size_t)ttlVLen, &err); \ + if (err != NULL) { \ + stError("streamState str: %s failed to write to %s, err: %s", toString, funcname, err); \ + taosMemoryFree(err); \ + code = -1; \ + } else { \ + stTrace("streamState str:%s succ to write to %s, rowValLen:%d, ttlValLen:%d", toString, funcname, vLen, \ + ttlVLen); \ + } \ + taosMemoryFree(ttlV); \ } while (0); #define STREAM_STATE_GET_ROCKSDB(pState, funcname, key, pVal, vLen) \ @@ -1817,7 +1829,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfKe char* err = NULL; \ int i = streamStateGetCfIdx(pState, funcname); \ if (i < 0) { \ - stWarn("streamState failed to get cf name: %s", funcname); \ + stWarn("streamState failed to get cf name: %s", funcname); \ code = -1; \ break; \ } \ @@ -1832,9 +1844,9 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfKe char* val = rocksdb_get_cf(db, opts, pHandle, (const char*)buf, klen, (size_t*)&len, &err); \ if (val == NULL || len == 0) { \ if (err == NULL) { \ - stTrace("streamState str: %s failed to read from %s_%s, err: not exist", toString, wrapper->idstr, funcname); \ + stTrace("streamState str: %s failed to read from %s_%s, err: not exist", toString, wrapper->idstr, funcname); \ } else { \ - stError("streamState str: %s failed to read from %s_%s, err: %s", toString, wrapper->idstr, funcname, err); \ + stError("streamState str: %s failed to read from %s_%s, err: %s", toString, wrapper->idstr, funcname, err); \ taosMemoryFreeClear(err); \ } \ code = -1; \ @@ -1842,11 +1854,11 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfKe char* p = NULL; \ int32_t tlen = ginitDict[i].deValueFunc(val, len, NULL, (char**)pVal); \ if (tlen <= 0) { \ - stError("streamState str: %s failed to read from %s_%s, err: already ttl ", toString, wrapper->idstr, \ - funcname); \ + stError("streamState str: %s failed to read from %s_%s, err: already ttl ", toString, wrapper->idstr, \ + funcname); \ code = -1; \ } else { \ - stTrace("streamState str: %s succ to read from %s_%s, valLen:%d", toString, wrapper->idstr, funcname, tlen); \ + stTrace("streamState str: %s succ to read from %s_%s, valLen:%d", toString, wrapper->idstr, funcname, tlen); \ } \ taosMemoryFree(val); \ if (vLen != NULL) *vLen = tlen; \ @@ -1860,7 +1872,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfKe char* err = NULL; \ int i = streamStateGetCfIdx(pState, funcname); \ if (i < 0) { \ - stWarn("streamState failed to get cf name: %s_%s", pState->pTdbState->idstr, funcname); \ + stWarn("streamState failed to get cf name: %s_%s", pState->pTdbState->idstr, funcname); \ code = -1; \ break; \ } \ @@ -1873,11 +1885,11 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfKe rocksdb_writeoptions_t* opts = wrapper->writeOpts; \ rocksdb_delete_cf(db, opts, pHandle, (const char*)buf, klen, &err); \ if (err != NULL) { \ - stError("streamState str: %s failed to del from %s_%s, err: %s", toString, wrapper->idstr, funcname, err); \ + stError("streamState str: %s failed to del from %s_%s, err: %s", toString, wrapper->idstr, funcname, err); \ taosMemoryFree(err); \ code = -1; \ } else { \ - stTrace("streamState str: %s succ to del from %s_%s", toString, wrapper->idstr, funcname); \ + stTrace("streamState str: %s succ to del from %s_%s", toString, wrapper->idstr, funcname); \ } \ } while (0); @@ -2378,6 +2390,7 @@ int32_t streamStateSessionGetKVByCur_rocksdb(SStreamStateCur* pCur, SSessionKey* char* val = NULL; int32_t len = decodeValueFunc((void*)vval, vLen, NULL, &val); if (len < 0) { + taosMemoryFree(val); return -1; } @@ -2861,6 +2874,7 @@ char* streamDefaultIterVal_rocksdb(void* iter, int32_t* len) { const char* val = rocksdb_iter_value(pCur->iter, (size_t*)&vlen); *len = decodeValueFunc((void*)val, vlen, NULL, &ret); if (*len < 0) { + taosMemoryFree(ret); return NULL; } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index bed2fd658e..86f9afe59f 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -133,7 +133,6 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF if (tdbTbOpen("checkpoint.db", sizeof(int32_t), -1, NULL, pMeta->db, &pMeta->pCheckpointDb, 0) < 0) { goto _err; } - if (streamMetaBegin(pMeta) < 0) { goto _err; } @@ -151,7 +150,6 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF pMeta->startInfo.pReadyTaskSet = taosHashInit(64, fp, false, HASH_NO_LOCK); if (pMeta->startInfo.pReadyTaskSet == NULL) { - } pMeta->pHbInfo = taosMemoryCalloc(1, sizeof(SMetaHbInfo)); @@ -210,7 +208,7 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF pMeta->numOfPausedTasks = 0; pMeta->numOfStreamTasks = 0; stInfo("vgId:%d open stream meta successfully, latest checkpoint:%" PRId64 ", stage:%" PRId64, vgId, pMeta->chkpId, - stage); + stage); return pMeta; _err: @@ -250,7 +248,7 @@ int32_t streamMetaReopen(SStreamMeta* pMeta) { if (code != 0) { terrno = TAOS_SYSTEM_ERROR(code); stError("vgId:%d failed to rename file, from %s to %s, code:%s", pMeta->vgId, newPath, defaultPath, - tstrerror(terrno)); + tstrerror(terrno)); taosMemoryFree(defaultPath); taosMemoryFree(newPath); @@ -383,10 +381,10 @@ int32_t streamMetaRemoveTask(SStreamMeta* pMeta, STaskId* pTaskId) { int64_t key[2] = {pTaskId->streamId, pTaskId->taskId}; int32_t code = tdbTbDelete(pMeta->pTaskDb, key, STREAM_TASK_KEY_LEN, pMeta->txn); if (code != 0) { - stError("vgId:%d failed to remove task:0x%x from metastore, code:%s", pMeta->vgId, (int32_t) pTaskId->taskId, - tstrerror(terrno)); + stError("vgId:%d failed to remove task:0x%x from metastore, code:%s", pMeta->vgId, (int32_t)pTaskId->taskId, + tstrerror(terrno)); } else { - stDebug("vgId:%d remove task:0x%x from metastore", pMeta->vgId, (int32_t) pTaskId->taskId); + stDebug("vgId:%d remove task:0x%x from metastore", pMeta->vgId, (int32_t)pTaskId->taskId); } return code; @@ -443,7 +441,7 @@ int32_t streamMetaGetNumOfStreamTasks(SStreamMeta* pMeta) { int32_t num = 0; size_t size = taosArrayGetSize(pMeta->pTaskList); for (int32_t i = 0; i < size; ++i) { - STaskId* pId = taosArrayGet(pMeta->pTaskList, i); + STaskId* pId = taosArrayGet(pMeta->pTaskList, i); SStreamTask** p = taosHashGet(pMeta->pTasksMap, pId, sizeof(*pId)); if (p == NULL) { continue; @@ -460,7 +458,7 @@ int32_t streamMetaGetNumOfStreamTasks(SStreamMeta* pMeta) { SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) { taosRLockLatch(&pMeta->lock); - STaskId id = {.streamId = streamId, .taskId = taskId}; + STaskId id = {.streamId = streamId, .taskId = taskId}; SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); if (ppTask != NULL) { if (!streamTaskShouldStop(&(*ppTask)->status)) { @@ -504,7 +502,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t // pre-delete operation taosWLockLatch(&pMeta->lock); - STaskId id = {.streamId = streamId, .taskId = taskId}; + STaskId id = {.streamId = streamId, .taskId = taskId}; SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); if (ppTask) { pTask = *ppTask; @@ -521,7 +519,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t taosWUnLockLatch(&pMeta->lock); stDebug("s-task:0x%x set task status:%s and start to unregister it", taskId, - streamGetTaskStatusStr(TASK_STATUS__DROPPING)); + streamGetTaskStatusStr(TASK_STATUS__DROPPING)); while (1) { taosRLockLatch(&pMeta->lock); @@ -659,7 +657,7 @@ static void doClear(void* pKey, void* pVal, TBC* pCur, SArray* pRecycleList) { } int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta) { - TBC* pCur = NULL; + TBC* pCur = NULL; int32_t vgId = pMeta->vgId; stInfo("vgId:%d load stream tasks from meta files", vgId); @@ -692,8 +690,8 @@ int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta) { doClear(pKey, pVal, pCur, pRecycleList); tFreeStreamTask(pTask); stError( - "vgId:%d stream read incompatible data, rm %s/vnode/vnode*/tq/stream if taosd cannot start, and rebuild stream " - "manually", vgId, tsDataDir); + "vgId:%d stream read incompatible data, rm %s/vnode/vnode*/tq/stream if taosd cannot start, and rebuild " + "stream manually", vgId, tsDataDir); return -1; } tDecoderClear(&decoder); @@ -765,7 +763,7 @@ int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta) { int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); ASSERT(pMeta->numOfStreamTasks <= numOfTasks && pMeta->numOfPausedTasks <= numOfTasks); stDebug("vgId:%d load %d tasks into meta from disk completed, streamTask:%d, paused:%d", pMeta->vgId, numOfTasks, - pMeta->numOfStreamTasks, pMeta->numOfPausedTasks); + pMeta->numOfStreamTasks, pMeta->numOfPausedTasks); taosArrayDestroy(pRecycleList); return 0; } @@ -830,7 +828,7 @@ int32_t tDecodeStreamHbMsg(SDecoder* pDecoder, SStreamHbMsg* pReq) { } static bool waitForEnoughDuration(SMetaHbInfo* pInfo) { - if ((++pInfo->tickCounter) >= META_HB_SEND_IDLE_COUNTER) { // reset the counter + if ((++pInfo->tickCounter) >= META_HB_SEND_IDLE_COUNTER) { // reset the counter pInfo->tickCounter = 0; return true; } @@ -966,7 +964,7 @@ void metaHbToMnode(void* param, void* tmrId) { pMeta->pHbInfo->hbCount += 1; stDebug("vgId:%d, build and send hb to mnode, numOfTasks:%d total:%d", pMeta->vgId, hbMsg.numOfTasks, - pMeta->pHbInfo->hbCount); + pMeta->pHbInfo->hbCount); tmsgSendReq(&epset, &msg); } else { stDebug("vgId:%d no tasks and no mnd epset, not send stream hb to mnode", pMeta->vgId); @@ -1003,7 +1001,7 @@ void streamMetaNotifyClose(SStreamMeta* pMeta) { int32_t vgId = pMeta->vgId; stDebug("vgId:%d notify all stream tasks that the vnode is closing. isLeader:%d startHb%" PRId64 ", totalHb:%d", vgId, - (pMeta->role == NODE_ROLE_LEADER), pMeta->pHbInfo->hbStart, pMeta->pHbInfo->hbCount); + (pMeta->role == NODE_ROLE_LEADER), pMeta->pHbInfo->hbStart, pMeta->pHbInfo->hbCount); taosWLockLatch(&pMeta->lock); diff --git a/source/libs/stream/src/streamSnapshot.c b/source/libs/stream/src/streamSnapshot.c index 6022935197..3de5de9967 100644 --- a/source/libs/stream/src/streamSnapshot.c +++ b/source/libs/stream/src/streamSnapshot.c @@ -17,6 +17,7 @@ #include "query.h" #include "rocksdb/c.h" #include "streamBackendRocksdb.h" +#include "streamInt.h" #include "tcommon.h" #include "streamInt.h" @@ -126,7 +127,8 @@ int32_t streamSnapHandleInit(SStreamSnapHandle* pHandle, char* path, int64_t chk stInfo("%s start to read snap %s", STREAM_STATE_TRANSFER, tdir); streamBackendAddInUseChkp(pMeta, chkpId); } else { - stWarn("%s failed to read from %s, reason: dir not exist,retry to default state dir", STREAM_STATE_TRANSFER, tdir); + stWarn("%s failed to read from %s, reason: dir not exist,retry to default state dir", STREAM_STATE_TRANSFER, + tdir); } } @@ -271,7 +273,7 @@ void streamSnapHandleDestroy(SStreamSnapHandle* handle) { if (handle->checkpointId == 0) { // del tmp dir - if (taosIsDir(pFile->path)) { + if (pFile && taosIsDir(pFile->path)) { taosRemoveDir(pFile->path); } } else { @@ -335,27 +337,27 @@ int32_t streamSnapRead(SStreamSnapReader* pReader, uint8_t** ppData, int64_t* si } else { pHandle->fd = streamOpenFile(pFile->path, item->name, TD_FILE_READ); stDebug("%s open file %s, current offset:%" PRId64 ", size:% " PRId64 ", file no.%d", STREAM_STATE_TRANSFER, - item->name, (int64_t)pHandle->offset, item->size, pHandle->currFileIdx); + item->name, (int64_t)pHandle->offset, item->size, pHandle->currFileIdx); } } stDebug("%s start to read file %s, current offset:%" PRId64 ", size:%" PRId64 ", file no.%d", STREAM_STATE_TRANSFER, - item->name, (int64_t)pHandle->offset, item->size, pHandle->currFileIdx); + item->name, (int64_t)pHandle->offset, item->size, pHandle->currFileIdx); uint8_t* buf = taosMemoryCalloc(1, sizeof(SStreamSnapBlockHdr) + kBlockSize); if(buf == NULL){ return TSDB_CODE_OUT_OF_MEMORY; } int64_t nread = taosPReadFile(pHandle->fd, buf + sizeof(SStreamSnapBlockHdr), kBlockSize, pHandle->offset); if (nread == -1) { + taosMemoryFree(buf); code = TAOS_SYSTEM_ERROR(terrno); stError("%s snap failed to read snap, file name:%s, type:%d,reason:%s", STREAM_STATE_TRANSFER, item->name, - item->type, tstrerror(code)); - taosMemoryFree(buf); + item->type, tstrerror(code)); return -1; } else if (nread > 0 && nread <= kBlockSize) { // left bytes less than kBlockSize stDebug("%s read file %s, current offset:%" PRId64 ",size:% " PRId64 ", file no.%d", STREAM_STATE_TRANSFER, - item->name, (int64_t)pHandle->offset, item->size, pHandle->currFileIdx); + item->name, (int64_t)pHandle->offset, item->size, pHandle->currFileIdx); pHandle->offset += nread; if (pHandle->offset >= item->size || nread < kBlockSize) { taosCloseFile(&pHandle->fd); @@ -364,7 +366,7 @@ int32_t streamSnapRead(SStreamSnapReader* pReader, uint8_t** ppData, int64_t* si } } else { stDebug("%s no data read, close file no.%d, move to next file, open and read", STREAM_STATE_TRANSFER, - pHandle->currFileIdx); + pHandle->currFileIdx); taosCloseFile(&pHandle->fd); pHandle->offset = 0; pHandle->currFileIdx += 1; @@ -383,7 +385,7 @@ int32_t streamSnapRead(SStreamSnapReader* pReader, uint8_t** ppData, int64_t* si pHandle->offset += nread; stDebug("%s open file and read file %s, current offset:%" PRId64 ", size:% " PRId64 ", file no.%d", - STREAM_STATE_TRANSFER, item->name, (int64_t)pHandle->offset, item->size, pHandle->currFileIdx); + STREAM_STATE_TRANSFER, item->name, (int64_t)pHandle->offset, item->size, pHandle->currFileIdx); } SStreamSnapBlockHdr* pHdr = (SStreamSnapBlockHdr*)buf; @@ -438,8 +440,8 @@ int32_t streamSnapWrite(SStreamSnapWriter* pWriter, uint8_t* pData, uint32_t nDa pHandle->fd = streamOpenFile(pFile->path, pItem->name, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND); if (pHandle->fd == NULL) { code = TAOS_SYSTEM_ERROR(terrno); - stError("%s failed to open file name:%s%s%s, reason:%s", STREAM_STATE_TRANSFER, pFile->path, TD_DIRSEP, pHdr->name, - tstrerror(code)); + stError("%s failed to open file name:%s%s%s, reason:%s", STREAM_STATE_TRANSFER, pFile->path, TD_DIRSEP, + pHdr->name, tstrerror(code)); } } @@ -465,8 +467,8 @@ int32_t streamSnapWrite(SStreamSnapWriter* pWriter, uint8_t* pData, uint32_t nDa pHandle->fd = streamOpenFile(pFile->path, pItem->name, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND); if (pHandle->fd == NULL) { code = TAOS_SYSTEM_ERROR(terrno); - stError("%s failed to open file name:%s%s%s, reason:%s", STREAM_STATE_TRANSFER, pFile->path, TD_DIRSEP, pHdr->name, - tstrerror(code)); + stError("%s failed to open file name:%s%s%s, reason:%s", STREAM_STATE_TRANSFER, pFile->path, TD_DIRSEP, + pHdr->name, tstrerror(code)); } taosPWriteFile(pHandle->fd, pHdr->data, pHdr->size, pHandle->offset); diff --git a/source/libs/stream/src/streamUpdate.c b/source/libs/stream/src/streamUpdate.c index f9ab672c4b..59471e8d8e 100644 --- a/source/libs/stream/src/streamUpdate.c +++ b/source/libs/stream/src/streamUpdate.c @@ -103,10 +103,12 @@ SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t waterma pInfo->minTS = -1; pInfo->interval = adjustInterval(interval, precision); pInfo->watermark = adjustWatermark(pInfo->interval, interval, watermark); + pInfo->numSBFs = 0; uint64_t bfSize = 0; if (!igUp) { bfSize = (uint64_t)(pInfo->watermark / pInfo->interval); + pInfo->numSBFs = bfSize; pInfo->pTsSBFs = taosArrayInit(bfSize, sizeof(void *)); if (pInfo->pTsSBFs == NULL) { @@ -130,7 +132,6 @@ SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t waterma _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT); pInfo->pMap = taosHashInit(DEFAULT_MAP_CAPACITY, hashFn, true, HASH_NO_LOCK); } - pInfo->numSBFs = bfSize; pInfo->maxDataVersion = 0; return pInfo; } diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c index 532a6955cf..019f8f7e62 100644 --- a/source/libs/sync/src/syncPipeline.c +++ b/source/libs/sync/src/syncPipeline.c @@ -197,6 +197,7 @@ int32_t syncLogBufferInitWithoutLock(SSyncLogBuffer* pBuf, SSyncNode* pNode) { SyncIndex index = toIndex; SSyncRaftEntry* pEntry = NULL; bool takeDummy = false; + int emptySize = (TSDB_SYNC_LOG_BUFFER_SIZE >> 1); while (true) { if (index <= pBuf->commitIndex) { @@ -210,7 +211,6 @@ int32_t syncLogBufferInitWithoutLock(SSyncLogBuffer* pBuf, SSyncNode* pNode) { } bool taken = false; - int emptySize = 5; if (toIndex - index + 1 <= pBuf->size - emptySize) { SSyncLogBufEntry tmp = {.pItem = pEntry, .prevLogIndex = -1, .prevLogTerm = -1}; pBuf->entries[index % pBuf->size] = tmp; diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index b66a08bd20..677e08ec56 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -576,6 +576,7 @@ void* destroyConnPool(SCliThrd* pThrd) { connList = taosHashIterate((SHashObj*)pool, connList); } taosHashCleanup(pool); + pThrd->pool = NULL; return NULL; } @@ -870,8 +871,10 @@ static void cliDestroyConn(SCliConn* conn, bool clear) { connList->list->numOfConn--; connList->size--; } else { - SConnList* connList = taosHashGet((SHashObj*)pThrd->pool, conn->dstAddr, strlen(conn->dstAddr) + 1); - if (connList != NULL) connList->list->numOfConn--; + if (pThrd->pool) { + SConnList* connList = taosHashGet((SHashObj*)pThrd->pool, conn->dstAddr, strlen(conn->dstAddr) + 1); + if (connList != NULL) connList->list->numOfConn--; + } } conn->list = NULL; pThrd->newConnCount--; diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index ca7e411874..bf73c253bc 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -1498,7 +1498,10 @@ int transSendResponse(const STransMsg* msg) { return 0; } SExHandle* exh = msg->info.handle; - int64_t refId = msg->info.refId; + if (exh == NULL) { + return 0; + } + int64_t refId = msg->info.refId; ASYNC_CHECK_HANDLE(exh, refId); STransMsg tmsg = *msg; diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index 2eee04a27a..c0435ca774 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -56,6 +56,8 @@ SWalReader *walOpenReader(SWal *pWal, SWalFilterCond *cond, int64_t id) { } void walCloseReader(SWalReader *pReader) { + if(pReader == NULL) return; + taosCloseFile(&pReader->pIdxFile); taosCloseFile(&pReader->pLogFile); taosMemoryFreeClear(pReader->pHead); diff --git a/source/os/src/osRand.c b/source/os/src/osRand.c index 43abc75d4f..b71be59f1d 100644 --- a/source/os/src/osRand.c +++ b/source/os/src/osRand.c @@ -86,9 +86,9 @@ void taosRandStr(char* str, int32_t size) { } void taosRandStr2(char* str, int32_t size) { - + const char* set = "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ@"; - int32_t len = strlen(set); + int32_t len = strlen(set); for (int32_t i = 0; i < size; ++i) { str[i] = set[taosRand() % len]; diff --git a/source/os/src/osSysinfo.c b/source/os/src/osSysinfo.c index 562328a198..4816ec8f8b 100644 --- a/source/os/src/osSysinfo.c +++ b/source/os/src/osSysinfo.c @@ -852,13 +852,12 @@ void taosGetProcIODelta(int64_t *rchars, int64_t *wchars, int64_t *read_bytes, i } int32_t taosGetCardInfo(int64_t *receive_bytes, int64_t *transmit_bytes) { -#ifdef WINDOWS *receive_bytes = 0; *transmit_bytes = 0; + +#ifdef WINDOWS return 0; #elif defined(_TD_DARWIN_64) - *receive_bytes = 0; - *transmit_bytes = 0; return 0; #else TdFilePtr pFile = taosOpenFile(tsSysNetFile, TD_FILE_READ | TD_FILE_STREAM); @@ -895,8 +894,8 @@ int32_t taosGetCardInfo(int64_t *receive_bytes, int64_t *transmit_bytes) { "%s %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64, nouse0, &o_rbytes, &rpackts, &nouse1, &nouse2, &nouse3, &nouse4, &nouse5, &nouse6, &o_tbytes, &tpackets); - *receive_bytes = o_rbytes; - *transmit_bytes = o_tbytes; + *receive_bytes += o_rbytes; + *transmit_bytes += o_tbytes; } taosCloseFile(&pFile); @@ -908,8 +907,8 @@ int32_t taosGetCardInfo(int64_t *receive_bytes, int64_t *transmit_bytes) { void taosGetCardInfoDelta(int64_t *receive_bytes, int64_t *transmit_bytes) { static int64_t last_receive_bytes = 0; static int64_t last_transmit_bytes = 0; - static int64_t cur_receive_bytes = 0; - static int64_t cur_transmit_bytes = 0; + int64_t cur_receive_bytes = 0; + int64_t cur_transmit_bytes = 0; if (taosGetCardInfo(&cur_receive_bytes, &cur_transmit_bytes) == 0) { *receive_bytes = cur_receive_bytes - last_receive_bytes; *transmit_bytes = cur_transmit_bytes - last_transmit_bytes; diff --git a/source/util/src/talgo.c b/source/util/src/talgo.c index e373850b3c..8d83a70c11 100644 --- a/source/util/src/talgo.c +++ b/source/util/src/talgo.c @@ -273,3 +273,86 @@ void taosheapsort(void *base, int32_t size, int32_t len, const void *parcompar, taosMemoryFree(buf); } + +static void taosMerge(void *src, int32_t start, int32_t leftend, int32_t end, int64_t size, const void *param, + __ext_compar_fn_t comparFn, void *tmp) { + int32_t leftSize = leftend - start + 1; + int32_t rightSize = end - leftend; + + void *leftBuf = tmp; + void *rightBuf = (char *)tmp + (leftSize * size); + + memcpy(leftBuf, elePtrAt(src, size, start), leftSize * size); + memcpy(rightBuf, elePtrAt(src, size, leftend + 1), rightSize * size); + + int32_t i = 0, j = 0, k = start; + + while (i < leftSize && j < rightSize) { + int32_t ret = comparFn(elePtrAt(leftBuf, size, i), elePtrAt(rightBuf, size, j), param); + if (ret <= 0) { + memcpy(elePtrAt(src, size, k), elePtrAt(leftBuf, size, i), size); + i++; + } else { + memcpy(elePtrAt(src, size, k), elePtrAt(rightBuf, size, j), size); + j++; + } + k++; + } + + while (i < leftSize) { + memcpy(elePtrAt(src, size, k), elePtrAt(leftBuf, size, i), size); + i++; + k++; + } + + while (j < rightSize) { + memcpy(elePtrAt(src, size, k), elePtrAt(rightBuf, size, j), size); + j++; + k++; + } +} + +static int32_t taosMergeSortHelper(void *src, int64_t numOfElem, int64_t size, const void *param, + __ext_compar_fn_t comparFn) { + // short array sort, instead of merge sort process + const int32_t THRESHOLD_SIZE = 6; + char *buf = taosMemoryCalloc(1, size); // prepare the swap buffer + if (buf == NULL) return TSDB_CODE_OUT_OF_MEMORY; + for (int32_t start = 0; start < numOfElem - 1; start += THRESHOLD_SIZE) { + int32_t end = (start + THRESHOLD_SIZE - 1) <= numOfElem - 1 ? (start + THRESHOLD_SIZE - 1) : numOfElem - 1; + tInsertSort(src, size, start, end, param, comparFn, buf); + } + taosMemoryFreeClear(buf); + + if (numOfElem > THRESHOLD_SIZE) { + int32_t currSize; + void *tmp = taosMemoryMalloc(numOfElem * size); + if (tmp == NULL) return TSDB_CODE_OUT_OF_MEMORY; + + for (currSize = THRESHOLD_SIZE; currSize <= numOfElem - 1; currSize = 2 * currSize) { + int32_t leftStart; + for (leftStart = 0; leftStart < numOfElem - 1; leftStart += 2 * currSize) { + int32_t leftend = leftStart + currSize - 1; + int32_t rightEnd = + (leftStart + 2 * currSize - 1 < numOfElem - 1) ? (leftStart + 2 * currSize - 1) : (numOfElem - 1); + if (leftend >= rightEnd) break; + + taosMerge(src, leftStart, leftend, rightEnd, size, param, comparFn, tmp); + } + } + + taosMemoryFreeClear(tmp); + } + return 0; +} + +int32_t msortHelper(const void *p1, const void *p2, const void *param) { + __compar_fn_t comparFn = param; + return comparFn(p1, p2); +} + + +int32_t taosMergeSort(void *src, int64_t numOfElem, int64_t size, __compar_fn_t comparFn) { + void *param = comparFn; + return taosMergeSortHelper(src, numOfElem, size, param, msortHelper); +} diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c index 8e7c0f9584..a7c28df22b 100644 --- a/source/util/src/tarray.c +++ b/source/util/src/tarray.c @@ -417,6 +417,10 @@ void taosArraySort(SArray* pArray, __compar_fn_t compar) { taosSort(pArray->pData, pArray->size, pArray->elemSize, compar); } +int32_t taosArrayMSort(SArray* pArray, __compar_fn_t compar) { + return taosMergeSort(pArray->pData, pArray->size, pArray->elemSize, compar); +} + void* taosArraySearch(const SArray* pArray, const void* key, __compar_fn_t comparFn, int32_t flags) { return taosbsearch(key, pArray->pData, pArray->size, pArray->elemSize, comparFn, flags); } diff --git a/source/util/test/CMakeLists.txt b/source/util/test/CMakeLists.txt index 0bf06e6f44..94f8deee44 100644 --- a/source/util/test/CMakeLists.txt +++ b/source/util/test/CMakeLists.txt @@ -84,3 +84,11 @@ add_test( NAME pageBufferTest COMMAND pageBufferTest ) + +# talgoTest +add_executable(talgoTest "talgoTest.cpp") +target_link_libraries(talgoTest os util gtest_main) +add_test( + NAME talgoTest + COMMAND talgoTest +) diff --git a/source/util/test/talgoTest.cpp b/source/util/test/talgoTest.cpp new file mode 100644 index 0000000000..b5a8db7378 --- /dev/null +++ b/source/util/test/talgoTest.cpp @@ -0,0 +1,104 @@ +#include +#include +#include "talgo.h" + +struct TestStruct { + int a; + float b; +}; + +// Define a custom comparison function for testing +int cmpFunc(const void* a, const void* b) { + const TestStruct* pa = reinterpret_cast(a); + const TestStruct* pb = reinterpret_cast(b); + if (pa->a < pb->a) { + return -1; + } else if (pa->a > pb->a) { + return 1; + } else { + return 0; + } +} + +TEST(utilTest, taosMSort) { + // Create an array of test data + TestStruct arr[] = {{4, 2.5}, {3, 6}, {2, 1.5}, {3, 2}, {1, 3.5}, {3, 5}}; + + // Sort the array using taosSort + taosMergeSort(arr, 6, sizeof(TestStruct), cmpFunc); + + for (int i = 0; i < sizeof(arr) / sizeof(TestStruct); i++) { + printf("%d: %d %f\n", i, arr[i].a, arr[i].b); + } + + // Check that the array is sorted correctly + EXPECT_EQ(arr[0].a, 1); + EXPECT_EQ(arr[1].a, 2); + EXPECT_EQ(arr[2].a, 3); + EXPECT_EQ(arr[2].b, 6); + EXPECT_EQ(arr[3].a, 3); + EXPECT_EQ(arr[3].b, 2); + EXPECT_EQ(arr[4].a, 3); + EXPECT_EQ(arr[4].b, 5); + EXPECT_EQ(arr[5].a, 4); +} + +int cmpInt(const void* a, const void* b) { + int int_a = *((int*)a); + int int_b = *((int*)b); + + if (int_a == int_b) + return 0; + else if (int_a < int_b) + return -1; + else + return 1; +} + +TEST(utilTest, taosMSort2) { + clock_t start_time, end_time; + double cpu_time_used; + + int times = 10000; + start_time = clock(); + for (int i = 0; i < 10000; i++) { + TestStruct arr[] = {{4, 2.5}, {3, 6}, {2, 1.5}, {3, 2}, {1, 3.5}, {3, 5}}; + taosMergeSort(arr, 6, sizeof(TestStruct), cmpFunc); + } + end_time = clock(); + cpu_time_used = ((double)(end_time - start_time)) / CLOCKS_PER_SEC; + printf("taosMSort %d times: %f s\n", times, cpu_time_used); + + start_time = clock(); + for (int i = 0; i < 10000; i++) { + TestStruct arr[] = {{4, 2.5}, {3, 6}, {2, 1.5}, {3, 2}, {1, 3.5}, {3, 5}}; + taosSort(arr, 6, sizeof(TestStruct), cmpFunc); + } + end_time = clock(); + cpu_time_used = ((double)(end_time - start_time)) / CLOCKS_PER_SEC; + printf("taosSort %d times: %f s\n", times, cpu_time_used); + + const int arraySize = 1000000; + int data1[arraySize]; + int data2[arraySize]; + for (int i = 0; i < arraySize; ++i) { + data1[i] = taosRand(); + data2[i] = data1[i]; + } + start_time = clock(); + taosMergeSort(data1, arraySize, sizeof(int), cmpInt); + end_time = clock(); + cpu_time_used = ((double)(end_time - start_time)) / CLOCKS_PER_SEC; + printf("taosMSort length:%d cost: %f s\n", arraySize, cpu_time_used); + + start_time = clock(); + taosSort(data2, arraySize, sizeof(int), cmpInt); + end_time = clock(); + cpu_time_used = ((double)(end_time - start_time)) / CLOCKS_PER_SEC; + printf("taosSort length:%d cost: %f s\n", arraySize, cpu_time_used); + + for (int i = 0; i < arraySize - 1; i++) { + EXPECT_EQ(data1[i], data2[i]); + ASSERT_LE(data1[i], data1[i+1]); + } +} diff --git a/tests/develop-test/2-query/ts-range.py b/tests/develop-test/2-query/ts-range.py new file mode 100644 index 0000000000..6ad88281ef --- /dev/null +++ b/tests/develop-test/2-query/ts-range.py @@ -0,0 +1,86 @@ +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import tdDnodes +from math import inf + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TS-4088] timestamp range support operator + ''' + return + + def init(self, conn, logSql, replicaVer=1): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), True) + self._conn = conn + + def restartTaosd(self, index=1, dbname="db"): + tdDnodes.stop(index) + tdDnodes.startWithoutSleep(index) + tdSql.execute(f"use ts_range") + + def run(self): + print("running {}".format(__file__)) + tdSql.execute("drop database if exists ts_range") + tdSql.execute("create database if not exists ts_range") + tdSql.execute('use ts_range') + tdSql.execute('create table stb1 (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) TAGS(t1 int, t2 binary(10), t3 double);') + + tdSql.execute("create table tb1 using stb1 tags(1,'1',1.0);") + + tdSql.execute("create table tb2 using stb1 tags(2,'2',2.0);") + + tdSql.execute("create table tb3 using stb1 tags(3,'3',3.0);") + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:00\',true,1,1,1,1,1,1,"123","1234",1,1,1,1);') + + tdSql.execute("insert into tb1 values ('2021-11-11 09:00:01',true,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);") + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:02\',true,2,NULL,2,NULL,2,NULL,"234",NULL,2,NULL,2,NULL);') + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:03\',false,NULL,3,NULL,3,NULL,3,NULL,"3456",NULL,3,NULL,3);') + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:04\',true,4,4,4,4,4,4,"456","4567",4,4,4,4);') + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:05\',true,127,32767,2147483647,9223372036854775807,3.402823466e+38,1.79769e+308,"567","5678",254,65534,4294967294,9223372036854775807);') + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:06\',true,-127,-32767,-2147483647,-9223372036854775807,-3.402823466e+38,-1.79769e+308,"678","6789",0,0,0,0);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:00\',true,1,1,1,1,1,1,"111","1111",1,1,1,1);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:01\',true,2,2,2,2,2,2,"222","2222",2,2,2,2);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:02\',true,3,3,2,3,3,3,"333","3333",3,3,3,3);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:03\',false,4,4,4,4,4,4,"444","4444",4,4,4,4);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:04\',true,5,5,5,5,5,5,"555","5555",5,5,5,5);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:05\',true,6,6,6,6,6,6,"666","6666",6,6,6,6);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:06\',true,7,7,7,7,7,7,"777","7777",7,7,7,7);') + + + tdSql.query('select count(*) from stb1 where ts < 1000000000000 + 10s') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + tdSql.query('select count(*) from stb1 where ts >= 1000000000000 + 10s') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 14) + + tdSql.query('select count(*) from stb1 where ts > 1000000000000 - 10s and ts <= 1000000000000 + 10s') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select count(*) from stb1 where ts > 1636592400000 + 3s'); + tdSql.checkData(0, 0, 6) + #tdSql.execute('drop database ts_range') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_sample_use_ts.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_sample_use_ts.json index 38aa47740f..56c2a52b6a 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_sample_use_ts.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_sample_use_ts.json @@ -43,7 +43,7 @@ "disorder_ratio": 0, "disorder_range": 1000, "timestamp_step": 1, - "start_timestamp": "now", + "start_timestamp": 1641976781440, "sample_file": "./5-taos-tools/taosbenchmark/csv/sample_use_ts.csv", "use_sample_ts": "yes", "tags_file": "./5-taos-tools/taosbenchmark/csv/sample_tags.csv", diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 8afb739cbc..e83586ca09 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -57,6 +57,10 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/partition_by_col_agg.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/partition_by_col_agg.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/partition_by_col_agg.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/interval_limit_opt_2.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/interval_limit_opt_2.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/interval_limit_opt_2.py -Q 2 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/interval_limit_opt_2.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqShow.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDropStb.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeStb0.py @@ -1012,6 +1016,7 @@ ,,y,script,./test.sh -f tsim/query/udf_with_const.sim ,,y,script,./test.sh -f tsim/query/join_interval.sim ,,y,script,./test.sh -f tsim/query/join_pk.sim +,,y,script,./test.sh -f tsim/query/count_spread.sim ,,y,script,./test.sh -f tsim/query/unionall_as_table.sim ,,y,script,./test.sh -f tsim/query/multi_order_by.sim ,,y,script,./test.sh -f tsim/query/sys_tbname.sim @@ -1023,12 +1028,13 @@ ,,y,script,./test.sh -f tsim/query/emptyTsRange_scl.sim ,,y,script,./test.sh -f tsim/query/partitionby.sim ,,y,script,./test.sh -f tsim/query/tableCount.sim -,,y,script,./test.sh -f tsim/query/show_db_table_kind.sim -,,y,script,./test.sh -f tsim/query/bi_star_table.sim -,,y,script,./test.sh -f tsim/query/tag_scan.sim +,,y,script,./test.sh -f tsim/query/show_db_table_kind.sim +,,y,script,./test.sh -f tsim/query/bi_star_table.sim +,,y,script,./test.sh -f tsim/query/tag_scan.sim ,,y,script,./test.sh -f tsim/query/nullColSma.sim ,,y,script,./test.sh -f tsim/query/bug3398.sim ,,y,script,./test.sh -f tsim/query/explain_tsorder.sim +,,y,script,./test.sh -f tsim/query/apercentile.sim ,,y,script,./test.sh -f tsim/qnode/basic1.sim ,,y,script,./test.sh -f tsim/snode/basic1.sim ,,y,script,./test.sh -f tsim/mnode/basic1.sim @@ -1264,6 +1270,7 @@ #develop test ,,n,develop-test,python3 ./test.py -f 2-query/table_count_scan.py +,,n,develop-test,python3 ./test.py -f 2-query/ts-range.py ,,n,develop-test,python3 ./test.py -f 2-query/show_create_db.py ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/auto_create_table_json.py ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/custom_col_tag.py diff --git a/tests/perf-test/build.sh b/tests/perf-test/build.sh new file mode 100755 index 0000000000..04717cbd13 --- /dev/null +++ b/tests/perf-test/build.sh @@ -0,0 +1,22 @@ +#! /bin/bash + +set -x + +cd $1 +git reset --hard HEAD +git checkout -- . +git checkout $2 +git pull + +sed -i ':a;N;$!ba;s/\(.*\)OFF/\1ON/' $1/cmake/cmake.options + +mkdir -p $1/debug +rm -rf $1/debug/* +cd $1/debug +cmake .. -DBUILD_TOOLS=true +cd $1/debug +make -j 4 +cd $1/debug +make install + +systemctl start taosd diff --git a/tests/perf-test/buildTD.py b/tests/perf-test/buildTD.py new file mode 100644 index 0000000000..9b47886089 --- /dev/null +++ b/tests/perf-test/buildTD.py @@ -0,0 +1,32 @@ +import os +import subprocess + +class BuildTDengine: + def __init__(self, host='vm96', path = '/root/pxiao/TDengine', branch = 'main') -> None: + self.host = host + self.path = path + self.branch = branch + + def build(self): + parameters=[self.path, self.branch] + build_fild = "./build.sh" + try: + # Run the Bash script using subprocess + subprocess.run(['bash', build_fild] + parameters, check=True) + print("TDengine build successfully.") + except subprocess.CalledProcessError as e: + print(f"Error running Bash script: {e}") + except FileNotFoundError as e: + print(f"File not found: {e}") + + def get_cmd_output(self, cmd): + try: + # Run the Bash command and capture the output + result = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True, text=True) + + # Access the output from the 'result' object + output = result.stdout + + return output.strip() + except subprocess.CalledProcessError as e: + print(f"Error running Bash command: {e}") \ No newline at end of file diff --git a/tests/perf-test/insert_json.py b/tests/perf-test/insert_json.py new file mode 100644 index 0000000000..7ce5fb86e2 --- /dev/null +++ b/tests/perf-test/insert_json.py @@ -0,0 +1,100 @@ +import datetime +import json + +class InsertJson: + def __init__(self, tables = 10000, records_per_table = 10000, interlace_rows = 0, stt_trigger = 1) -> None: + self.tables = tables + self.records_per_table = records_per_table + self.interlace_rows = interlace_rows + self.stt_trigger = stt_trigger + + def get_db_cfg(self) -> dict: + return { + "name": "test", + "drop": "true", + "replica": 1, + "precision": "ms", + "cachemodel": "'both'", + "keep": 3650, + "minRows": 100, + "maxRows": 4096, + "comp": 2, + "vgroups": 10, + "stt_trigger": self.stt_trigger + } + + def get_stb_cfg(self) -> list: + return [ + { + "name": "meters", + "child_table_exists": "no", + "childtable_count": self.tables, + "childtable_prefix": "d", + "escape_character": "yes", + "auto_create_table": "no", + "batch_create_tbl_num": 5, + "data_source": "rand", + "insert_mode": "taosc", + "non_stop_mode": "no", + "line_protocol": "line", + "insert_rows": self.records_per_table, + "childtable_limit": 10000, + "childtable_offset": 100, + "interlace_rows": self.interlace_rows, + "insert_interval": 0, + "partial_col_num": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2022-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": self.get_column_list(), + "tags": self.get_tag_list() + } + ] + + def get_column_list(self) -> list: + return [ + {"type": "FLOAT", "name": "current", "count": 1, "max": 12, "min": 8}, + {"type": "INT", "name": "voltage", "max": 225, "min": 215}, + {"type": "FLOAT", "name": "phase", "max": 1, "min": 0}, + ] + + def get_tag_list(self) -> list: + return [ + { "type": "TINYINT", "name": "groupid", "max": 10, "min": 1 }, + { "name": "location", "type": "BINARY", "len": 16, "values": ["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View", "Sunnyvale", "Santa Clara", "Cupertino"]} + ] + + def get_insert_cfg(self) -> dict: + return { + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "create_table_thread_count": 7, + "result_file": "/tmp/insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": self.get_db_cfg(), + "super_tables": self.get_stb_cfg() + }] + } + + def create_insert_file(self) -> str: + date = datetime.datetime.now() + file_create_table = f"/tmp/insert_{date:%F-%H%M}.json" + + with open(file_create_table, 'w') as f: + json.dump(self.get_insert_cfg(), f) + + return file_create_table \ No newline at end of file diff --git a/tests/perf-test/mysqldb.py b/tests/perf-test/mysqldb.py new file mode 100644 index 0000000000..f25f4f35f7 --- /dev/null +++ b/tests/perf-test/mysqldb.py @@ -0,0 +1,60 @@ +import mysql.connector + +class MySQLDatabase: + def __init__(self, host = '192.168.1.116', port = 3306, user = 'root', password = 'taosdata', database = 'perf_data'): + self.host = host + self.port = port + self.user = user + self.password = password + self.database = database + self.connection = None + + def connect(self): + try: + self.connection = mysql.connector.connect( + host=self.host, + port=self.port, + user=self.user, + password=self.password, + database=self.database + ) + except mysql.connector.Error as error: + print("Failed to connect to database: {}".format(error)) + + def execute(self, query, params=None): + cursor = self.connection.cursor() + try: + cursor.execute(query, params) + self.connection.commit() + except mysql.connector.Error as error: + print("Failed to execute query: {}".format(error)) + finally: + cursor.close() + + def query(self, query, params=None): + cursor = self.connection.cursor() + try: + cursor.execute(query, params) + result = cursor.fetchall() + return result + except mysql.connector.Error as error: + print("Failed to execute query: {}".format(error)) + finally: + cursor.close() + + def get_id(self, query, params = None): + cursor = self.connection.cursor() + try: + cursor.execute(query, params) + cursor.execute("select last_insert_id()") + id = cursor.fetchone()[0] + self.connection.commit() + + return id + except mysql.connector.Error as error: + print("Failed to execute query: {}".format(error)) + finally: + cursor.close() + + def disconnect(self): + self.connection.close() \ No newline at end of file diff --git a/tests/perf-test/query_json.py b/tests/perf-test/query_json.py new file mode 100644 index 0000000000..0c2b2f38d1 --- /dev/null +++ b/tests/perf-test/query_json.py @@ -0,0 +1,41 @@ +import datetime +import json + +class QueryJson: + def __init__(self, sql, query_times = 1) -> None: + self.sql = sql + self.query_times = query_times + + def gen_query_json(self) -> dict: + return { + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "test", + "query_times": self.query_times, + "query_mode": "taosc", + "specified_table_query": { + "query_interval": 1, + "concurrent": 1, + "sqls": [ + { + "sql": "%s" % self.sql, + "result": "./query_res.txt" + } + ] + } + + } + + def create_query_file(self) -> str: + date = datetime.datetime.now() + file_create_table = f"/tmp/query_{date:%F-%H%M}.json" + + with open(file_create_table, 'w') as f: + json.dump(self.gen_query_json(), f) + + return file_create_table \ No newline at end of file diff --git a/tests/perf-test/write_perf_data.py b/tests/perf-test/write_perf_data.py new file mode 100644 index 0000000000..4a2021c356 --- /dev/null +++ b/tests/perf-test/write_perf_data.py @@ -0,0 +1,75 @@ +import os +import socket +import mysqldb +import insert_json +import query_json +import buildTD + +if __name__ == "__main__": + # Build TDengine + hostname = socket.gethostname() + new_build = buildTD.BuildTDengine(host = hostname) + + new_build.build() + cmd = f"cd {new_build.path} && git rev-parse --short @ " + commit_id = new_build.get_cmd_output(cmd) + branch = new_build.branch + + num_of_tables = 10000 + records_per_table = 10000 + interlace_rows = 0 + stt_trigger = 1 + + # get scenario id + db = mysqldb.MySQLDatabase() + db.connect() + sql = f"select id from scenarios where num_of_tables = {num_of_tables} and records_per_table = {records_per_table} and interlace_rows = {interlace_rows} and stt_trigger = {stt_trigger}" + row = db.query(sql) + if row is None: + id = db.get_id(f"insert into scenarios(num_of_tables, records_per_table, interlace_rows, stt_trigger) values({num_of_tables},{records_per_table}, {interlace_rows}, {stt_trigger})") + else: + id = row[0][0] + + print(f"scenario id is {id}") + + # record insert performance data + insert = insert_json.InsertJson(num_of_tables, records_per_table, interlace_rows, stt_trigger) + os.system(f"taosBenchmark -f {insert.create_insert_file()}") + + cmd = "grep Spent /tmp/insert_res.txt | tail -1 | awk {'print $5'}" + time = new_build.get_cmd_output(cmd) + + cmd = "grep Spent /tmp/insert_res.txt | tail -1 | awk {'print $16'}" + speed = new_build.get_cmd_output(cmd) + + sql = f"insert into insert_perf(sid, time_cost, records_per_sec, branch, commit_id, date) values({id}, {time}, {speed}, '{branch}', '{commit_id}', now())" + print(sql) + db.execute(sql) + + # record query performance data + sql = "select * from queries" + res = db.query(sql) + for row in res: + json = query_json.QueryJson(row[1], query_times=1) + print(f"query: {row[1]}") + os.system(f"taosBenchmark -f {json.create_query_file()} > /tmp/{row[0]}.txt") + cmd = "grep delay /tmp/%d.txt | awk {'print $11'} | cut -d 's' -f 1" % row[0] + print(f"cmd is {cmd}") + avg = new_build.get_cmd_output(cmd) + print(f"avg is {avg}") + if (avg == ""): + break + + sql = f"insert into query_perf(sid, qid, time_cost, branch, commit_id, date) values({id}, {row[0]}, {avg}, '{branch}', '{commit_id}', now())" + print(sql) + db.execute(sql) + + # close connection + db.disconnect() + + + + + + + \ No newline at end of file diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 91aac1929f..7dcf6bc3f2 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -78,7 +78,7 @@ class TDSql: self.cursor.execute(s) time.sleep(2) - def error(self, sql, expectedErrno = None): + def error(self, sql, expectedErrno = None, expectErrInfo = None): caller = inspect.getframeinfo(inspect.stack()[1][0]) expectErrNotOccured = True @@ -87,12 +87,9 @@ class TDSql: except BaseException as e: expectErrNotOccured = False self.errno = e.errno - self.error_info = repr(e) - # print(error_info) - # self.error_info = error_info[error_info.index('(')+1:-1].split(",")[0].replace("'","") + error_info = repr(e) + self.error_info = error_info[error_info.index('(')+1:-1].split(",")[0].replace("'","") # self.error_info = (','.join(error_info.split(",")[:-1]).split("(",1)[1:][0]).replace("'","") - # print("!!!!!!!!!!!!!!",self.error_info) - if expectErrNotOccured: tdLog.exit("%s(%d) failed: sql:%s, expect error not occured" % (caller.filename, caller.lineno, sql)) else: @@ -108,8 +105,15 @@ class TDSql: else: tdLog.info("sql:%s, expect error occured" % (sql)) - return self.error_info + if expectErrInfo != None: + if expectErrInfo == self.error_info: + tdLog.info("sql:%s, expected expectErrInfo %s occured" % (sql, expectErrInfo)) + else: + tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo %s occured, but not expected errno %s" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo)) + else: + tdLog.info("sql:%s, expect error occured" % (sql)) + return self.error_info def query(self, sql, row_tag=None, queryTimes=10, count_expected_res=None): self.sql = sql @@ -257,7 +261,7 @@ class TDSql: return self.cursor.istype(col, dataType) - def checkData(self, row, col, data): + def checkData(self, row, col, data, show = False): if row >= self.queryRows: caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno, self.sql, row+1, self.queryRows) @@ -275,8 +279,8 @@ class TDSql: if isinstance(data,str) : if (len(data) >= 28): if self.queryResult[row][col] == _parse_ns_timestamp(data): - # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{pd.to_datetime(resultData)} == expect:{data}") - tdLog.info("check successfully") + if(show): + tdLog.info("check successfully") else: caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) @@ -284,7 +288,8 @@ class TDSql: else: if self.queryResult[row][col].astimezone(datetime.timezone.utc) == _parse_datetime(data).astimezone(datetime.timezone.utc): # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") - tdLog.info("check successfully") + if(show): + tdLog.info("check successfully") else: caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) @@ -317,7 +322,8 @@ class TDSql: if data == self.queryResult[row][col]: success = True if success: - tdLog.info("check successfully") + if(show): + tdLog.info("check successfully") else: caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) @@ -328,7 +334,8 @@ class TDSql: delt_data = data-datetime.datetime.fromtimestamp(0,data.tzinfo) delt_result = self.queryResult[row][col] - datetime.datetime.fromtimestamp(0,self.queryResult[row][col].tzinfo) if delt_data == delt_result: - tdLog.info("check successfully") + if(show): + tdLog.info("check successfully") else: caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) @@ -341,16 +348,19 @@ class TDSql: if str(self.queryResult[row][col]) == str(data): # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") - tdLog.info("check successfully") + if(show): + tdLog.info("check successfully") return elif isinstance(data, float): if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001: # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") - tdLog.info("check successfully") + if(show): + tdLog.info("check successfully") elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001: # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") - tdLog.info("check successfully") + if(show): + tdLog.info("check successfully") else: caller = inspect.getframeinfo(inspect.stack()[1][0]) @@ -361,7 +371,8 @@ class TDSql: caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) - tdLog.info("check successfully") + if(show): + tdLog.info("check successfully") # return true or false replace exit, no print out def checkRowColNoExit(self, row, col): diff --git a/tests/script/api/passwdTest.c b/tests/script/api/passwdTest.c index 24059bd35d..928525750e 100644 --- a/tests/script/api/passwdTest.c +++ b/tests/script/api/passwdTest.c @@ -48,6 +48,7 @@ void createUsers(TAOS *taos, const char *host, char *qstr); void passVerTestMulti(const char *host, char *qstr); void sysInfoTest(TAOS *taos, const char *host, char *qstr); void userDroppedTest(TAOS *taos, const char *host, char *qstr); +void clearTestEnv(TAOS *taos, const char *host, char *qstr); int nPassVerNotified = 0; int nUserDropped = 0; @@ -210,6 +211,7 @@ int main(int argc, char *argv[]) { passVerTestMulti(argv[1], qstr); sysInfoTest(taos, argv[1], qstr); userDroppedTest(taos, argv[1], qstr); + clearTestEnv(taos, argv[1], qstr); taos_close(taos); taos_cleanup(); @@ -267,9 +269,9 @@ void passVerTestMulti(const char *host, char *qstr) { queryDB(taos[0], "create database if not exists demo2 vgroups 1 minrows 10"); queryDB(taos[0], "create database if not exists demo3 vgroups 1 minrows 10"); - queryDB(taos[0], "create table demo1.stb (ts timestamp, c1 int) tags(t1 int)"); - queryDB(taos[0], "create table demo2.stb (ts timestamp, c1 int) tags(t1 int)"); - queryDB(taos[0], "create table demo3.stb (ts timestamp, c1 int) tags(t1 int)"); + queryDB(taos[0], "create table if not exists demo1.stb (ts timestamp, c1 int) tags(t1 int)"); + queryDB(taos[0], "create table if not exists demo2.stb (ts timestamp, c1 int) tags(t1 int)"); + queryDB(taos[0], "create table if not exists demo3.stb (ts timestamp, c1 int) tags(t1 int)"); strcpy(qstr, "alter user root pass 'taos'"); queryDB(taos[0], qstr); @@ -326,9 +328,9 @@ void sysInfoTest(TAOS *taosRoot, const char *host, char *qstr) { queryDB(taosRoot, "create database if not exists demo12 vgroups 1 minrows 10"); queryDB(taosRoot, "create database if not exists demo13 vgroups 1 minrows 10"); - queryDB(taosRoot, "create table demo11.stb (ts timestamp, c1 int) tags(t1 int)"); - queryDB(taosRoot, "create table demo12.stb (ts timestamp, c1 int) tags(t1 int)"); - queryDB(taosRoot, "create table demo13.stb (ts timestamp, c1 int) tags(t1 int)"); + queryDB(taosRoot, "create table if not exists demo11.stb (ts timestamp, c1 int) tags(t1 int)"); + queryDB(taosRoot, "create table if not exists demo12.stb (ts timestamp, c1 int) tags(t1 int)"); + queryDB(taosRoot, "create table if not exists demo13.stb (ts timestamp, c1 int) tags(t1 int)"); sprintf(qstr, "show grants"); char output[BUF_LEN]; @@ -387,10 +389,14 @@ _REP: fprintf(stderr, ">>> succeed to run sysInfoTest\n"); fprintf(stderr, "######## %s #########\n", __func__); } - +static bool isDropUser = true; void userDroppedTest(TAOS *taos, const char *host, char *qstr) { // users int nTestUsers = nUser; + int nLoop = 0; +_loop: + ++nLoop; + printf("\n\n%s:%d LOOP %d, nTestUsers:%d\n", __func__, __LINE__, nLoop, nTestUsers); for (int i = 0; i < nTestUsers; ++i) { // sprintf(users[i], "user%d", i); taosu[i] = taos_connect(host, users[i], "taos", NULL, 0); @@ -426,7 +432,6 @@ void userDroppedTest(TAOS *taos, const char *host, char *qstr) { for (int i = 0; i < nTestUsers; ++i) { taos_close(taosu[i]); printf("%s:%d close taosu[%d]\n", __func__, __LINE__, i); - sleep(1); } fprintf(stderr, "######## %s #########\n", __func__); @@ -437,5 +442,32 @@ void userDroppedTest(TAOS *taos, const char *host, char *qstr) { exit(1); } fprintf(stderr, "######## %s #########\n", __func__); - // sleep(300); + + if (nLoop < 5) { + nUserDropped = 0; + for (int i = 0; i < nTestUsers; ++i) { + sprintf(users[i], "user%d", i); + sprintf(qstr, "CREATE USER %s PASS 'taos'", users[i]); + fprintf(stderr, "%s:%d create user:%s\n", __func__, __LINE__, users[i]); + queryDB(taos, qstr); + } + goto _loop; + } + isDropUser = false; +} + +void clearTestEnv(TAOS *taos, const char *host, char *qstr) { + fprintf(stderr, "######## %s start #########\n", __func__); + // restore password + sprintf(qstr, "alter user root pass 'taosdata'"); + queryDB(taos, qstr); + + if (isDropUser) { + for (int i = 0; i < nUser; ++i) { + sprintf(qstr, "drop user %s", users[i]); + queryDB(taos, qstr); + } + } + // sleep(3000); + fprintf(stderr, "######## %s end #########\n", __func__); } \ No newline at end of file diff --git a/tests/script/tsim/query/apercentile.sim b/tests/script/tsim/query/apercentile.sim new file mode 100644 index 0000000000..71d075b0ef --- /dev/null +++ b/tests/script/tsim/query/apercentile.sim @@ -0,0 +1,36 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +sql drop database if exists test2; +sql create database test2; +sql use test2; +sql create table s(ts timestamp,v double) tags(id nchar(16)); +sql create table t using s tags('11') ; +sql insert into t values(now,null); +sql select APERCENTILE(v,50,'t-digest') as k from s where ts > now-1d and ts < now interval(1h); +if $rows != 1 then + return -1 +endi +if $data00 != NULL then + return -1 +endi + +sql select APERCENTILE(v,50) as k from s where ts > now-1d and ts < now interval(1h); +if $rows != 1 then + return -1 +endi +if $data00 != NULL then + return -1 +endi + +sql select APERCENTILE(v,50) as k from s where ts > now-1d and ts < now interval(1h); +if $rows != 1 then + return -1 +endi +if $data00 != NULL then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/count_spread.sim b/tests/script/tsim/query/count_spread.sim new file mode 100644 index 0000000000..c03783b7fe --- /dev/null +++ b/tests/script/tsim/query/count_spread.sim @@ -0,0 +1,24 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +sql create database test; +sql use test; +sql create table st(ts timestamp, f int) tags(t int); +sql insert into ct1 using st tags(1) values(now, 0)(now+1s, 1)(now+2s, 10)(now+3s, 11) +sql insert into ct2 using st tags(2) values(now+2s, 2)(now+3s, 3) +sql insert into ct3 using st tags(3) values(now+4s, 4)(now+5s, 5) +sql insert into ct4 using st tags(4) values(now+6s, 6)(now+7s, 7) + +sql select count(*), spread(ts) from st where tbname='ct1' +print $data00, $data01 +if $data00 != @4@ then + return -1 +endi +if $data01 != @3000.000000000@ then + return -1 +endi + +sql drop database test; +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stream/checkStreamSTable.sim b/tests/script/tsim/stream/checkStreamSTable.sim index 3b31cbc383..873fb3f060 100644 --- a/tests/script/tsim/stream/checkStreamSTable.sim +++ b/tests/script/tsim/stream/checkStreamSTable.sim @@ -22,6 +22,8 @@ sql create table t2 using st tags(2,2,2); sql create stable result.streamt0(ts timestamp,a int,b int) tags(ta int,tb varchar(100),tc int); sql create stream streams0 trigger at_once into result.streamt0 tags(tb) as select _wstart, count(*) c1, max(a) c2 from st partition by tbname tb interval(10s); +sleep 500 + sql insert into t1 values(1648791213000,1,2,3); sql insert into t2 values(1648791213000,2,2,3); @@ -106,6 +108,8 @@ sql create table t2 using st tags(2,2,2); sql create stable result1.streamt1(ts timestamp,a int,b int,c int) tags(ta varchar(100),tb int,tc int); sql create stream streams1 trigger at_once into result1.streamt1(ts,c,a,b) tags(ta) as select _wstart, count(*) c1, max(a),min(b) c2 from st partition by tbname as ta interval(10s); +sleep 500 + sql insert into t1 values(1648791213000,10,20,30); sql insert into t2 values(1648791213000,40,50,60); @@ -194,7 +198,7 @@ sql_error create stream streams2 trigger at_once into result2.streamt2 as selec # column dest 3, source 2 sql create stream streams2 trigger at_once into result2.streamt2(ts, a) tags(ta) as select _wstart, count(*) c1 from st partition by tbname as ta interval(10s); - +sleep 500 print ===== step5 @@ -211,6 +215,7 @@ sql create table t2 using st tags(4,5,6); sql create stable result3.streamt3(ts timestamp,a int,b int,c int, d int) tags(ta int,tb int,tc int); sql create stream streams3 trigger at_once into result3.streamt3(ts,c,a,b) as select _wstart, count(*) c1, max(a),min(b) c2 from st interval(10s); +sleep 500 sql insert into t1 values(1648791213000,10,20,30); sql insert into t2 values(1648791213000,40,50,60); @@ -290,6 +295,7 @@ sql create table t2 using st tags(4,5,6); sql create stable result4.streamt4(ts timestamp,a int,b int,c int, d int) tags(tg1 int,tg2 int,tg3 int); sql create stream streams4 trigger at_once into result4.streamt4(ts,c,a,b) tags(tg2, tg3, tg1) subtable( concat("tbl-", cast(tg1 as varchar(10)) ) ) as select _wstart, count(*) c1, max(a),min(b) c2 from st partition by ta+1 as tg1, cast(tb as bigint) as tg2, tc as tg3 interval(10s); +sleep 500 sql insert into t1 values(1648791213000,10,20,30); sql insert into t2 values(1648791213000,40,50,60); @@ -374,6 +380,7 @@ sql create table t2 using st tags(4,5,6); sql create stable result5.streamt5(ts timestamp,a int,b int,c int, d int) tags(tg1 int,tg2 int,tg3 int); sql create stream streams5 trigger at_once into result5.streamt5(ts,c,a,b) tags(tg2, tg3, tg1) subtable( concat("tbl-", cast(tg3 as varchar(10)) ) ) as select _wstart, count(*) c1, max(a),min(b) c2 from st partition by ta+1 as tg1, cast(tb as bigint) as tg2, a as tg3 session(ts, 10s); +sleep 500 sql insert into t1 values(1648791213000,NULL,NULL,NULL); @@ -458,6 +465,7 @@ sql create stream streams8 trigger at_once into streamt8 as select _wstart as sql drop stream streams8; sql create stream streams71 trigger at_once into streamt8(ts, c2) tags(group_id)as select _wstart, count(*) from t1 partition by tbname as group_id interval(10s); +sleep 500 sql insert into t1 values(1648791233000,1,2,3,1.0); diff --git a/tests/script/tsim/stream/checkStreamSTable1.sim b/tests/script/tsim/stream/checkStreamSTable1.sim index 57d0f0190d..dd44f5c102 100644 --- a/tests/script/tsim/stream/checkStreamSTable1.sim +++ b/tests/script/tsim/stream/checkStreamSTable1.sim @@ -15,6 +15,8 @@ sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); sql create stream streams1 trigger at_once into streamt1 as select _wstart, count(*) c1, count(a) c2 from st interval(1s) ; +sleep 500 + sql insert into t1 values(1648791211000,1,2,3); sql insert into t1 values(1648791212000,2,2,3); @@ -44,6 +46,7 @@ sql alter table streamt1 add column c3 double; print create stream streams1 trigger at_once into streamt1 as select _wstart, count(*) c1, count(a) c2, avg(b) c3 from st interval(1s) ; sql create stream streams1 trigger at_once into streamt1 as select _wstart, count(*) c1, count(a) c2, avg(b) c3 from st interval(1s) ; +sleep 500 sql insert into t2 values(1648791213000,1,2,3); sql insert into t1 values(1648791214000,1,2,3); diff --git a/tests/script/tsim/stream/deleteInterval.sim b/tests/script/tsim/stream/deleteInterval.sim index b78de20a97..11e5ee39d2 100644 --- a/tests/script/tsim/stream/deleteInterval.sim +++ b/tests/script/tsim/stream/deleteInterval.sim @@ -17,6 +17,7 @@ sql create database test vgroups 1; sql use test; sql create table t1(ts timestamp, a int, b int , c int, d double); sql create stream streams0 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _wstart c1, count(*) c2, max(a) c3 from t1 interval(10s); +sleep 500 sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sleep 1000 @@ -194,6 +195,7 @@ sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into test.streamt2 as select _wstart c1, count(*) c2, max(a) c3 from st interval(10s); +sleep 500 sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sql insert into t2 values(1648791213000,NULL,NULL,NULL,NULL); @@ -420,6 +422,7 @@ sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); sql create stream streams3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into test.streamt3 as select _wstart c1, count(*) c2, max(a) c3 from st interval(10s); +sleep 500 sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sql insert into t2 values(1648791213000,NULL,NULL,NULL,NULL); diff --git a/tests/script/tsim/stream/partitionbyColumnInterval.sim b/tests/script/tsim/stream/partitionbyColumnInterval.sim index d586522cc8..d5f815d533 100644 --- a/tests/script/tsim/stream/partitionbyColumnInterval.sim +++ b/tests/script/tsim/stream/partitionbyColumnInterval.sim @@ -17,6 +17,7 @@ sql create database test vgroups 1; sql use test; sql create table t1(ts timestamp, a int, b int , c int, d double); sql create stream streams0 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _wstart c1, count(*) c2, max(a) c3, _group_key(a) c4 from t1 partition by a interval(10s); +sleep 1000 sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); @@ -198,6 +199,7 @@ sql create database test1 vgroups 1; sql use test1; sql create table t1(ts timestamp, a int, b int , c int, d double); sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt1 as select _wstart c1, count(*) c2, max(c) c3, _group_key(a+b) c4 from t1 partition by a+b interval(10s); +sleep 1000 sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); @@ -285,6 +287,7 @@ sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into test.streamt2 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a interval(10s); +sleep 1000 sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); @@ -482,6 +485,7 @@ sql create table t2 using st tags(2,2,2); sql create table t3 using st tags(2,2,2); sql create table t4 using st tags(2,2,2); sql create stream streams4 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into test.streamt4 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a interval(10s); +sleep 1000 sql insert into t1 values(1648791213000,2,2,3,1.0); sql insert into t2 values(1648791213000,2,2,3,1.0); @@ -572,6 +576,7 @@ sql create table t2 using st tags(2,2,2); sql create table t3 using st tags(2,2,2); sql create table t4 using st tags(2,2,2); sql create stream streams5 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into test.streamt5 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a interval(10s); +sleep 1000 sql insert into t1 values(1648791213000,1,2,3,1.0); sql insert into t2 values(1648791213000,2,2,3,1.0); @@ -584,7 +589,6 @@ sql insert into t3 values(1648791223000,3,2,3,1.0); sql insert into t4 values(1648791223000,4,2,3,1.0); sleep 1000 - sql delete from st where ts = 1648791223000; $loop_count = 0 diff --git a/tests/script/tsim/tagindex/add_index.sim b/tests/script/tsim/tagindex/add_index.sim index e73c7480ac..cf1b5e05e9 100644 --- a/tests/script/tsim/tagindex/add_index.sim +++ b/tests/script/tsim/tagindex/add_index.sim @@ -293,9 +293,9 @@ if $rows != 1 then endi #$drop_name=`$data[0][0]` -#sql drop index `$data[0][0]\` +#sql drop index `$data[0][0]\` -#if $rows != 0 then +#if $rows != 0 then # return -1 #endi diff --git a/tests/script/tsim/tmq/basic1.sim b/tests/script/tsim/tmq/basic1.sim index fe6ec04a20..4ef0c121f6 100644 --- a/tests/script/tsim/tmq/basic1.sim +++ b/tests/script/tsim/tmq/basic1.sim @@ -62,8 +62,8 @@ $keyList = $keyList . , $keyList = $keyList . enable.auto.commit:false #$keyList = $keyList . , #$keyList = $keyList . auto.commit.interval.ms:6000 -#$keyList = $keyList . , -#$keyList = $keyList . auto.offset.reset:earliest +$keyList = $keyList . , +$keyList = $keyList . auto.offset.reset:earliest $keyList = $keyList . ' print ========== key list: $keyList diff --git a/tests/script/tsim/tmq/basic1Of2Cons.sim b/tests/script/tsim/tmq/basic1Of2Cons.sim index c12351cbe8..d2906ec875 100644 --- a/tests/script/tsim/tmq/basic1Of2Cons.sim +++ b/tests/script/tsim/tmq/basic1Of2Cons.sim @@ -62,8 +62,8 @@ $keyList = $keyList . , $keyList = $keyList . enable.auto.commit:false #$keyList = $keyList . , #$keyList = $keyList . auto.commit.interval.ms:6000 -#$keyList = $keyList . , -#$keyList = $keyList . auto.offset.reset:earliest +$keyList = $keyList . , +$keyList = $keyList . auto.offset.reset:earliest $keyList = $keyList . ' print ========== key list: $keyList diff --git a/tests/script/tsim/tmq/basic2.sim b/tests/script/tsim/tmq/basic2.sim index 5c7528ea5d..4477101d0f 100644 --- a/tests/script/tsim/tmq/basic2.sim +++ b/tests/script/tsim/tmq/basic2.sim @@ -62,8 +62,8 @@ $keyList = $keyList . , $keyList = $keyList . enable.auto.commit:false #$keyList = $keyList . , #$keyList = $keyList . auto.commit.interval.ms:6000 -#$keyList = $keyList . , -#$keyList = $keyList . auto.offset.reset:earliest +$keyList = $keyList . , +$keyList = $keyList . auto.offset.reset:earliest $keyList = $keyList . ' print ========== key list: $keyList diff --git a/tests/script/tsim/tmq/basic2Of2Cons.sim b/tests/script/tsim/tmq/basic2Of2Cons.sim index 23598c17a4..951a1d52fd 100644 --- a/tests/script/tsim/tmq/basic2Of2Cons.sim +++ b/tests/script/tsim/tmq/basic2Of2Cons.sim @@ -62,8 +62,8 @@ $keyList = $keyList . , $keyList = $keyList . enable.auto.commit:false #$keyList = $keyList . , #$keyList = $keyList . auto.commit.interval.ms:6000 -#$keyList = $keyList . , -#$keyList = $keyList . auto.offset.reset:earliest +$keyList = $keyList . , +$keyList = $keyList . auto.offset.reset:earliest $keyList = $keyList . ' print ========== key list: $keyList diff --git a/tests/script/tsim/tmq/basic2Of2ConsOverlap.sim b/tests/script/tsim/tmq/basic2Of2ConsOverlap.sim index 1223a94fa7..8cc447f0c7 100644 --- a/tests/script/tsim/tmq/basic2Of2ConsOverlap.sim +++ b/tests/script/tsim/tmq/basic2Of2ConsOverlap.sim @@ -62,8 +62,8 @@ $keyList = $keyList . , $keyList = $keyList . enable.auto.commit:false #$keyList = $keyList . , #$keyList = $keyList . auto.commit.interval.ms:6000 -#$keyList = $keyList . , -#$keyList = $keyList . auto.offset.reset:earliest +$keyList = $keyList . , +$keyList = $keyList . auto.offset.reset:earliest $keyList = $keyList . ' print ========== key list: $keyList diff --git a/tests/script/tsim/tmq/basic3.sim b/tests/script/tsim/tmq/basic3.sim index 8bb34cefa2..da2bee4f6b 100644 --- a/tests/script/tsim/tmq/basic3.sim +++ b/tests/script/tsim/tmq/basic3.sim @@ -62,8 +62,8 @@ $keyList = $keyList . , $keyList = $keyList . enable.auto.commit:false #$keyList = $keyList . , #$keyList = $keyList . auto.commit.interval.ms:6000 -#$keyList = $keyList . , -#$keyList = $keyList . auto.offset.reset:earliest +$keyList = $keyList . , +$keyList = $keyList . auto.offset.reset:earliest $keyList = $keyList . ' print ========== key list: $keyList diff --git a/tests/script/tsim/tmq/basic3Of2Cons.sim b/tests/script/tsim/tmq/basic3Of2Cons.sim index 75d762c44b..21d691bd9c 100644 --- a/tests/script/tsim/tmq/basic3Of2Cons.sim +++ b/tests/script/tsim/tmq/basic3Of2Cons.sim @@ -62,8 +62,8 @@ $keyList = $keyList . , $keyList = $keyList . enable.auto.commit:false #$keyList = $keyList . , #$keyList = $keyList . auto.commit.interval.ms:6000 -#$keyList = $keyList . , -#$keyList = $keyList . auto.offset.reset:earliest +$keyList = $keyList . , +$keyList = $keyList . auto.offset.reset:earliest $keyList = $keyList . ' print ========== key list: $keyList diff --git a/tests/script/tsim/tmq/basic4.sim b/tests/script/tsim/tmq/basic4.sim index c72d8ff412..adeab58ff2 100644 --- a/tests/script/tsim/tmq/basic4.sim +++ b/tests/script/tsim/tmq/basic4.sim @@ -62,8 +62,8 @@ $keyList = $keyList . , $keyList = $keyList . enable.auto.commit:false #$keyList = $keyList . , #$keyList = $keyList . auto.commit.interval.ms:6000 -#$keyList = $keyList . , -#$keyList = $keyList . auto.offset.reset:earliest +$keyList = $keyList . , +$keyList = $keyList . auto.offset.reset:earliest $keyList = $keyList . ' print ========== key list: $keyList diff --git a/tests/script/tsim/tmq/basic4Of2Cons.sim b/tests/script/tsim/tmq/basic4Of2Cons.sim index bb006a354c..186005b231 100644 --- a/tests/script/tsim/tmq/basic4Of2Cons.sim +++ b/tests/script/tsim/tmq/basic4Of2Cons.sim @@ -62,8 +62,8 @@ $keyList = $keyList . , $keyList = $keyList . enable.auto.commit:false #$keyList = $keyList . , #$keyList = $keyList . auto.commit.interval.ms:6000 -#$keyList = $keyList . , -#$keyList = $keyList . auto.offset.reset:earliest +$keyList = $keyList . , +$keyList = $keyList . auto.offset.reset:earliest $keyList = $keyList . ' print ========== key list: $keyList diff --git a/tests/script/tsim/tmq/snapshot.sim b/tests/script/tsim/tmq/snapshot.sim index fbdaba7d28..c0194d98c8 100644 --- a/tests/script/tsim/tmq/snapshot.sim +++ b/tests/script/tsim/tmq/snapshot.sim @@ -62,8 +62,8 @@ $keyList = $keyList . , $keyList = $keyList . enable.auto.commit:false #$keyList = $keyList . , #$keyList = $keyList . auto.commit.interval.ms:6000 -#$keyList = $keyList . , -#$keyList = $keyList . auto.offset.reset:earliest +$keyList = $keyList . , +$keyList = $keyList . auto.offset.reset:earliest $keyList = $keyList . ' print ========== key list: $keyList diff --git a/tests/script/tsim/tmq/snapshot1.sim b/tests/script/tsim/tmq/snapshot1.sim index 5349981cc7..6121692d6c 100644 --- a/tests/script/tsim/tmq/snapshot1.sim +++ b/tests/script/tsim/tmq/snapshot1.sim @@ -62,8 +62,8 @@ $keyList = $keyList . , $keyList = $keyList . enable.auto.commit:false #$keyList = $keyList . , #$keyList = $keyList . auto.commit.interval.ms:6000 -#$keyList = $keyList . , -#$keyList = $keyList . auto.offset.reset:earliest +$keyList = $keyList . , +$keyList = $keyList . auto.offset.reset:earliest $keyList = $keyList . ' print ========== key list: $keyList diff --git a/tests/script/tsim/user/privilege_create_db.sim b/tests/script/tsim/user/privilege_create_db.sim index c81bd1b258..f199e2ee9c 100644 --- a/tests/script/tsim/user/privilege_create_db.sim +++ b/tests/script/tsim/user/privilege_create_db.sim @@ -68,10 +68,10 @@ print =============connect with root, revoke read from u1, all from u2 sql connect sql revoke read on u1_d1.* from u1 sql revoke all on u2_d1.* from u2 -sleep 1000 print =============connect with u1 sql connect u1 +sql reset query cache sql insert into u1_d1.t1 values(now, 1) sql_error select * from u1_d1.t1; @@ -85,9 +85,9 @@ sql connect sql grant read on u1_d1.* to u1 sql grant all on u2_d1.* to u2 -sleep 1000 print =============connect with u1 sql connect u1 +sql reset query cache sql select * from u1_d1.t1; sql insert into u1_d1.t1 values(now, 2) diff --git a/tests/system-test/0-others/compatibility.py b/tests/system-test/0-others/compatibility.py index cb804aad0c..83bfb2bed7 100644 --- a/tests/system-test/0-others/compatibility.py +++ b/tests/system-test/0-others/compatibility.py @@ -30,7 +30,7 @@ class TDTestCase: self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - self.deletedDataSql= '''drop database if exists deldata;create database deldata duration 300;use deldata; + self.deletedDataSql= '''drop database if exists deldata;create database deldata duration 300 stt_trigger 4; ;use deldata; create table deldata.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int); create table deldata.ct1 using deldata.stb1 tags ( 1 ); insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a ); @@ -38,7 +38,9 @@ class TDTestCase: delete from deldata.stb1; flush database deldata; insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a ); - delete from deldata.ct1;''' + delete from deldata.ct1; + insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ); + flush database deldata;''' def checkProcessPid(self,processName): i=0 while i<60: @@ -262,7 +264,7 @@ class TDTestCase: if self.is_list_same_as_ordered_list(resultList,expectList): print("The unordered list is the same as the ordered list.") else: - tdlog.error("The unordered list is not the same as the ordered list.") + tdLog.exit("The unordered list is not the same as the ordered list.") tdsql.execute("insert into test.d80 values (now+1s, 11, 103, 0.21);") tdsql.execute("insert into test.d9 values (now+5s, 4.3, 104, 0.4);") diff --git a/tests/system-test/0-others/information_schema.py b/tests/system-test/0-others/information_schema.py index bc6683e59f..51347f5f64 100644 --- a/tests/system-test/0-others/information_schema.py +++ b/tests/system-test/0-others/information_schema.py @@ -217,7 +217,7 @@ class TDTestCase: tdSql.checkEqual(20470,len(tdSql.queryResult)) tdSql.query("select * from information_schema.ins_columns where db_name ='information_schema'") - tdSql.checkEqual(195, len(tdSql.queryResult)) + tdSql.checkEqual(198, len(tdSql.queryResult)) tdSql.query("select * from information_schema.ins_columns where db_name ='performance_schema'") tdSql.checkEqual(54, len(tdSql.queryResult)) diff --git a/tests/system-test/0-others/show.py b/tests/system-test/0-others/show.py index 4ef323db22..75d7116e03 100644 --- a/tests/system-test/0-others/show.py +++ b/tests/system-test/0-others/show.py @@ -210,6 +210,27 @@ class TDTestCase: licences_info = tdSql.queryResult tdSql.checkEqual(grants_info,licences_info) + def show_column_name(self): + tdSql.execute("create database db;") + tdSql.execute("use db;") + tdSql.execute("create table ta(ts timestamp, name nchar(16), age int , address int);") + tdSql.execute("insert into ta values(now, 'jack', 19, 23);") + + colName1 = ["ts","name","age","address"] + colName2 = tdSql.getColNameList("select last(*) from ta;") + for i in range(len(colName1)): + if colName2[i] != f"last({colName1[i]})": + tdLog.exit(f"column name is different. {colName2} != last({colName1[i]} ") + return + + # alter option + tdSql.execute("alter local 'keepColumnName' '1';") + colName3 = tdSql.getColNameList("select last(*) from ta;") + for col in colName3: + if colName1 != colName3: + tdLog.exit(f"column name is different. colName1= {colName1} colName2={colName3}") + return + def run(self): self.check_gitinfo() self.show_base() @@ -218,6 +239,7 @@ class TDTestCase: self.show_create_sql() self.show_create_sysdb_sql() self.show_create_systb_sql() + self.show_column_name() def stop(self): tdSql.close() diff --git a/tests/system-test/0-others/show_tag_index.py b/tests/system-test/0-others/show_tag_index.py index d39f9eaab9..c79880ba35 100644 --- a/tests/system-test/0-others/show_tag_index.py +++ b/tests/system-test/0-others/show_tag_index.py @@ -180,6 +180,13 @@ class TDTestCase: tdSql.error(f'show indexes from db.ctb1 from db') tdSql.error(f'show indexes from `db`.`ctb1` from db') + # check error information + tdSql.error(f'create index idx1 on db2.stb (t1);', expectErrInfo='Database not exist') + tdSql.error(f'use db2;', expectErrInfo='Database not exist') + tdSql.error(f' alter stable db2.stb add column c2 int;', expectErrInfo='Database not exist') + + + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/system-test/0-others/splitVGroupRep1.py b/tests/system-test/0-others/splitVGroupRep1.py index b119ba0a32..0b75a3e6e1 100644 --- a/tests/system-test/0-others/splitVGroupRep1.py +++ b/tests/system-test/0-others/splitVGroupRep1.py @@ -223,7 +223,7 @@ class TDTestCase: start1 = time.time() rows1 = tdSql.query(sql1) spend1 = time.time() - start1 - res1 = copy.copy(tdSql.queryResult) + res1 = copy.deepcopy(tdSql.queryResult) sql2 = sql.replace('@db_name', self.db2) tdLog.info(sql2) @@ -234,6 +234,7 @@ class TDTestCase: rowlen1 = len(res1) rowlen2 = len(res2) + errCnt = 0 if rowlen1 != rowlen2: tdLog.exit(f"both row count not equal. rowlen1={rowlen1} rowlen2={rowlen2} ") @@ -249,8 +250,11 @@ class TDTestCase: return False for j in range(collen1): if row1[j] != row2[j]: - tdLog.exit(f"both col not equal. row={i} col={j} col1={row1[j]} col2={row2[j]} .") - return False + tdLog.info(f"error both column value not equal. row={i} col={j} col1={row1[j]} col2={row2[j]} .") + errCnt += 1 + + if errCnt > 0: + tdLog.exit(f" db2 column value different with db2. different count ={errCnt} ") # warning performance diff = (spend2 - spend1)*100/spend1 @@ -391,7 +395,7 @@ class TDTestCase: tdSql.execute("use topicdb;") tdSql.execute("create table ta(ts timestamp, age int);") tdSql.execute("create topic toa as select * from ta;") - + #self.expectSplitError("topicdb") tdSql.execute("drop topic toa;") self.expectSplitOk("topicdb") @@ -409,6 +413,9 @@ class TDTestCase: # prepare env self.prepareEnv() + tdLog.info("check db1 and db2 same after creating ...") + self.checkResult() + for i in range(3): # split vgroup on db2 start = time.time() diff --git a/tests/system-test/0-others/splitVGroupRep3.py b/tests/system-test/0-others/splitVGroupRep3.py index 68c915eeaf..d45b037b5a 100644 --- a/tests/system-test/0-others/splitVGroupRep3.py +++ b/tests/system-test/0-others/splitVGroupRep3.py @@ -233,6 +233,7 @@ class TDTestCase: rowlen1 = len(res1) rowlen2 = len(res2) + errCnt = 0 if rowlen1 != rowlen2: tdLog.exit(f"both row count not equal. rowlen1={rowlen1} rowlen2={rowlen2} ") @@ -248,8 +249,11 @@ class TDTestCase: return False for j in range(collen1): if row1[j] != row2[j]: - tdLog.exit(f"both col not equal. row={i} col={j} col1={row1[j]} col2={row2[j]} .") - return False + tdLog.info(f"error both column value not equal. row={i} col={j} col1={row1[j]} col2={row2[j]} .") + errCnt += 1 + + if errCnt > 0: + tdLog.exit(f" db2 column value different with db2. different count ={errCnt} ") # warning performance diff = (spend2 - spend1)*100/spend1 diff --git a/tests/system-test/0-others/ttl.py b/tests/system-test/0-others/ttl.py index 32b18c6bbb..6ae6edfe5d 100644 --- a/tests/system-test/0-others/ttl.py +++ b/tests/system-test/0-others/ttl.py @@ -7,7 +7,7 @@ from util.dnodes import * class TDTestCase: updatecfgDict = {'ttlUnit': 1, "ttlPushInterval": 1, "ttlChangeOnWrite": 0} - + def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") @@ -21,7 +21,8 @@ class TDTestCase: tdSql.execute(f'create table {self.dbname}.t2(ts timestamp, c1 int) ttl {self.ttl}') tdSql.query(f'show {self.dbname}.tables') tdSql.checkRows(2) - + tdSql.execute(f'flush database {self.dbname}') + time.sleep(self.ttl + 2) tdSql.query(f'show {self.dbname}.tables') tdSql.checkRows(1) diff --git a/tests/system-test/0-others/ttlChangeOnWrite.py b/tests/system-test/0-others/ttlChangeOnWrite.py index 7bb10e25e8..16c6585e07 100644 --- a/tests/system-test/0-others/ttlChangeOnWrite.py +++ b/tests/system-test/0-others/ttlChangeOnWrite.py @@ -6,9 +6,9 @@ from util.dnodes import * class TDTestCase: - updatecfgDict = {'ttlUnit': 1, "ttlPushInterval": 3, "ttlChangeOnWrite": 1, "trimVDbIntervalSec": 360, + updatecfgDict = {'ttlUnit': 1, "ttlPushInterval": 3, "ttlChangeOnWrite": 1, "trimVDbIntervalSec": 360, "ttlFlushThreshold": 100, "ttlBatchDropNum": 10} - + def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") @@ -16,15 +16,16 @@ class TDTestCase: self.ttl = 5 self.tables = 100 self.dbname = "test" - + def check_batch_drop_num(self): tdSql.execute(f'create database {self.dbname} vgroups 1') tdSql.execute(f'use {self.dbname}') tdSql.execute(f'create table stb(ts timestamp, c1 int) tags(t1 int)') for i in range(self.tables): tdSql.execute(f'create table t{i} using stb tags({i}) ttl {self.ttl}') - - time.sleep(self.ttl + 3) + + tdSql.execute(f'flush database {self.dbname}') + time.sleep(self.ttl + self.updatecfgDict['ttlPushInterval'] + 1) tdSql.query('show tables') tdSql.checkRows(90) @@ -35,14 +36,17 @@ class TDTestCase: tdSql.execute(f'create table {self.dbname}.t2(ts timestamp, c1 int) ttl {self.ttl}') tdSql.query(f'show {self.dbname}.tables') tdSql.checkRows(2) - - time.sleep(self.ttl) + + tdSql.execute(f'flush database {self.dbname}') + time.sleep(self.ttl - 1) tdSql.execute(f'insert into {self.dbname}.t2 values(now, 1)'); - - time.sleep(self.ttl) + + tdSql.execute(f'flush database {self.dbname}') + time.sleep(self.ttl - 1) tdSql.query(f'show {self.dbname}.tables') tdSql.checkRows(2) - + + tdSql.execute(f'flush database {self.dbname}') time.sleep(self.ttl * 2) tdSql.query(f'show {self.dbname}.tables') tdSql.checkRows(1) diff --git a/tests/system-test/0-others/user_privilege_multi_users.py b/tests/system-test/0-others/user_privilege_multi_users.py index 8812f42e7b..53ff136e63 100644 --- a/tests/system-test/0-others/user_privilege_multi_users.py +++ b/tests/system-test/0-others/user_privilege_multi_users.py @@ -107,6 +107,7 @@ class TDTestCase: tdLog.debug("case passed") else: tdLog.exit("The privilege number in information_schema.ins_user_privileges is incorrect") + tdSql.query("select * from information_schema.ins_columns where db_name='{self.dbname}';") def stop(self): # remove the privilege diff --git a/tests/system-test/0-others/walRetention.py b/tests/system-test/0-others/walRetention.py index 0fdeb84a5b..53316fc88b 100644 --- a/tests/system-test/0-others/walRetention.py +++ b/tests/system-test/0-others/walRetention.py @@ -109,11 +109,14 @@ class VNode : # load config tdLog.info(f' meta-ver file={metaFile}') if metaFile != "": - jsonVer = jsonFromFile(metaFile) - metaNode = jsonVer["meta"] - self.snapVer = int(metaNode["snapshotVer"]) - self.firstVer = int(metaNode["firstVer"]) - self.lastVer = int(metaNode["lastVer"]) + try: + jsonVer = jsonFromFile(metaFile) + metaNode = jsonVer["meta"] + self.snapVer = int(metaNode["snapshotVer"]) + self.firstVer = int(metaNode["firstVer"]) + self.lastVer = int(metaNode["lastVer"]) + except Exception as e: + tdLog.info(f' read json file except.') # sort with startVer self.walFiles = sorted(self.walFiles, key=lambda x : x.startVer, reverse=True) diff --git a/tests/system-test/1-insert/delete_data.py b/tests/system-test/1-insert/delete_data.py index aaad723b89..ffeb9e23a9 100644 --- a/tests/system-test/1-insert/delete_data.py +++ b/tests/system-test/1-insert/delete_data.py @@ -14,6 +14,7 @@ import random import string +import time from numpy import logspace from util import constant @@ -298,13 +299,37 @@ class TDTestCase: tdSql.query(f'select {func}(*) from {self.stbname}') tdSql.execute(f'drop table {self.stbname}') tdSql.execute(f'drop database {self.dbname}') + + def FIX_TS_3987(self): + tdSql.execute("create database db duration 1d vgroups 1;") + tdSql.execute("use db;") + tdSql.execute("create table t (ts timestamp, a int);") + tdSql.execute("insert into t values (1694681045000, 1);") + tdSql.execute("select * from t;") + tdSql.execute("flush database db;") + tdSql.execute("select * from t;") + tdSql.execute("delete from t where ts = 1694681045000;") + tdSql.execute("select * from t;") + tdSql.execute("insert into t values (1694581045000, 2);") + tdSql.execute("select * from t;") + tdSql.execute("flush database db;") + tdSql.query("select * from t;") + time.sleep(5) + tdSql.query("select * from t;") + + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1694581045000) + tdSql.checkData(0, 1, 2) + def run(self): + self.FIX_TS_3987() self.delete_data_ntb() self.delete_data_ctb() self.delete_data_stb() tdDnodes.stoptaosd(1) tdDnodes.starttaosd(1) self.delete_data_ntb() + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/system-test/1-insert/table_param_ttl.py b/tests/system-test/1-insert/table_param_ttl.py index 6cc978a76c..f36a49a1d7 100644 --- a/tests/system-test/1-insert/table_param_ttl.py +++ b/tests/system-test/1-insert/table_param_ttl.py @@ -35,6 +35,7 @@ class TDTestCase: tdSql.execute(f'create table db.{self.ntbname}_{i} (ts timestamp,c0 int) ttl {self.ttl_param}') tdSql.query(f'show db.tables') tdSql.checkRows(self.tbnum) + tdSql.execute(f'flush database db') sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval'] + 1) tdSql.query(f'show db.tables') tdSql.checkRows(0) @@ -42,6 +43,7 @@ class TDTestCase: tdSql.execute(f'create table db.{self.ntbname}_{i} (ts timestamp,c0 int) ttl {self.default_ttl}') for i in range(int(self.tbnum/2)): tdSql.execute(f'alter table db.{self.ntbname}_{i} ttl {self.modify_ttl}') + tdSql.execute(f'flush database db') sleep(self.updatecfgDict['ttlUnit']*self.modify_ttl+self.updatecfgDict['ttlPushInterval'] + 1) tdSql.query(f'show db.tables') tdSql.checkRows(self.tbnum - int(self.tbnum/2)) @@ -54,6 +56,7 @@ class TDTestCase: tdSql.execute(f'create table db.{self.stbname}_{i} using db.{self.stbname} tags({i}) ttl {self.ttl_param}') tdSql.query(f'show db.tables') tdSql.checkRows(self.tbnum) + tdSql.execute(f'flush database db') sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval'] + 1) tdSql.query(f'show db.tables') tdSql.checkRows(0) @@ -63,6 +66,7 @@ class TDTestCase: tdSql.checkRows(self.tbnum) for i in range(int(self.tbnum/2)): tdSql.execute(f'alter table db.{self.stbname}_{i} ttl {self.modify_ttl}') + tdSql.execute(f'flush database db') sleep(self.updatecfgDict['ttlUnit']*self.modify_ttl+self.updatecfgDict['ttlPushInterval'] + 1) tdSql.query(f'show db.tables') tdSql.checkRows(self.tbnum - int(self.tbnum/2)) @@ -75,6 +79,7 @@ class TDTestCase: tdSql.execute(f'insert into db.{self.stbname}_{i} using db.{self.stbname} tags({i}) ttl {self.ttl_param} values(now,1)') tdSql.query(f'show db.tables') tdSql.checkRows(self.tbnum) + tdSql.execute(f'flush database db') sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval'] + 1) tdSql.query(f'show db.tables') tdSql.checkRows(0) diff --git a/tests/system-test/2-query/diff.py b/tests/system-test/2-query/diff.py index c6f233eefa..10e16a690f 100644 --- a/tests/system-test/2-query/diff.py +++ b/tests/system-test/2-query/diff.py @@ -16,10 +16,42 @@ class TDTestCase: self.perfix = 'dev' self.tables = 10 + def check_result(self): + for i in range(self.rowNum): + tdSql.checkData(i, 0, 1); + + def full_datatype_test(self): + tdSql.execute("use db;") + sql = "create table db.st(ts timestamp, c1 bool, c2 float, c3 double,c4 tinyint, c5 smallint, c6 int, c7 bigint, c8 tinyint unsigned, c9 smallint unsigned, c10 int unsigned, c11 bigint unsigned) tags( area int);" + tdSql.execute(sql) + + sql = "create table db.t1 using db.st tags(1);" + tdSql.execute(sql) + + ts = 1694000000000 + rows = 126 + for i in range(rows): + ts += 1 + sql = f"insert into db.t1 values({ts},true,{i},{i},{i%127},{i%32767},{i},{i},{i%127},{i%32767},{i},{i});" + tdSql.execute(sql) + + sql = "select diff(ts),diff(c1),diff(c3),diff(c4),diff(c5),diff(c6),diff(c7),diff(c8),diff(c9),diff(c10),diff(c11) from db.t1" + tdSql.query(sql) + tdSql.checkRows(rows - 1) + for i in range(rows - 1): + for j in range(10): + if j == 1: # bool + tdSql.checkData(i, j, 0) + else: + tdSql.checkData(i, j, 1) def run(self): tdSql.prepare() dbname = "db" + + # full type test + self.full_datatype_test() + tdSql.execute( f"create table {dbname}.ntb(ts timestamp,c1 int,c2 double,c3 float)") tdSql.execute( @@ -179,11 +211,6 @@ class TDTestCase: tdSql.error(f"select diff(col8) from {dbname}.stb_1") tdSql.error(f"select diff(col9) from {dbname}.stb") tdSql.error(f"select diff(col9) from {dbname}.stb_1") - tdSql.error(f"select diff(col11) from {dbname}.stb_1") - tdSql.error(f"select diff(col12) from {dbname}.stb_1") - tdSql.error(f"select diff(col13) from {dbname}.stb_1") - tdSql.error(f"select diff(col14) from {dbname}.stb_1") - tdSql.error(f"select diff(col14) from {dbname}.stb_1") tdSql.error(f"select diff(col1,col1,col1) from {dbname}.stb_1") tdSql.error(f"select diff(col1,1,col1) from {dbname}.stb_1") tdSql.error(f"select diff(col1,col1,col) from {dbname}.stb_1") @@ -217,6 +244,22 @@ class TDTestCase: tdSql.query(f"select diff(col6) from {dbname}.stb_1") tdSql.checkRows(10) + tdSql.query(f"select diff(col11) from {dbname}.stb_1") + tdSql.checkRows(10) + self.check_result() + + tdSql.query(f"select diff(col12) from {dbname}.stb_1") + tdSql.checkRows(10) + self.check_result() + + tdSql.query(f"select diff(col13) from {dbname}.stb_1") + tdSql.checkRows(10) + self.check_result() + + tdSql.query(f"select diff(col14) from {dbname}.stb_1") + tdSql.checkRows(10) + self.check_result() + tdSql.execute(f'''create table {dbname}.stb1(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute(f"create table {dbname}.stb1_1 using {dbname}.stb tags('shanghai')") diff --git a/tests/system-test/2-query/interval_limit_opt.py b/tests/system-test/2-query/interval_limit_opt.py index 851138fed3..492f453de5 100644 --- a/tests/system-test/2-query/interval_limit_opt.py +++ b/tests/system-test/2-query/interval_limit_opt.py @@ -174,61 +174,6 @@ class TDTestCase: for offset in range(0, 1000, 500): self.test_interval_limit_asc(offset) self.test_interval_limit_desc(offset) - self.test_interval_fill_limit(offset) - self.test_interval_order_by_limit(offset) - self.test_interval_partition_by_slimit(offset) - - def test_interval_fill_limit(self, offset: int = 0): - sqls = [ - "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ - where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1s) fill(linear)", - "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ - where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1m) fill(linear)", - "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ - where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1h) fill(linear)", - "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ - where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1d) fill(linear)" - ] - for sql in sqls: - self.query_and_check_with_limit(sql, 5000, 1000, offset) - - def test_interval_order_by_limit(self, offset: int = 0): - sqls = [ - "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ - where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by b", - "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ - where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by a desc", - "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), last(ts) from meters \ - where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by a desc", - "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ - where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by count(*), sum(c1), a", - "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ - where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by a, count(*), sum(c1)", - "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ - where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by b", - "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ - where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by a desc", - "select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters \ - where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by a desc", - "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ - where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by count(*), sum(c1), a", - "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ - where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by a, count(*), sum(c1)", - ] - for sql in sqls: - self.query_and_check_with_limit(sql, 6000, 2000, offset) - - def test_interval_partition_by_slimit(self, offset: int = 0): - sqls = [ - "select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters " - "where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by t1 interval(1m)", - "select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters " - "where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by t1 interval(1h)", - "select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters " - "where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by c3 interval(1m)", - ] - for sql in sqls: - self.query_and_check_with_slimit(sql, 10, 2, offset) def test_interval_partition_by_slimit_limit(self): sql = "select * from (select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts),c3 from meters " \ diff --git a/tests/system-test/2-query/interval_limit_opt_2.py b/tests/system-test/2-query/interval_limit_opt_2.py new file mode 100644 index 0000000000..cadb32b388 --- /dev/null +++ b/tests/system-test/2-query/interval_limit_opt_2.py @@ -0,0 +1,222 @@ +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +# from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 4 + self.ctbNum = 10 + self.rowsPerTbl = 10000 + self.duraion = '1h' + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=2,replica=1, duration:str='1d'): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d duration %s"%(dbName, vgroups, replica, duration)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, paraDict): + colString = tdCom.gen_column_type_str(colname_prefix=paraDict["colPrefix"], column_elm_list=paraDict["colSchema"]) + tagString = tdCom.gen_tag_type_str(tagname_prefix=paraDict["tagPrefix"], tag_elm_list=paraDict["tagSchema"]) + sqlString = f"create table if not exists %s.%s (%s) tags (%s)"%(paraDict["dbName"], paraDict["stbName"], colString, tagString) + tdLog.debug("%s"%(sqlString)) + tsql.execute(sqlString) + return + + def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1,ctbStartIdx=0): + for i in range(ctbNum): + sqlString = "create table %s.%s%d using %s.%s tags(%d, 'tb%d', 'tb%d', %d, %d, %d)" % \ + (dbName,ctbPrefix,i+ctbStartIdx,dbName,stbName,(i+ctbStartIdx) % 5,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx) + tsql.execute(sqlString) + + tdLog.debug("complete to create %d child tables by %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs,tsStep): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + for i in range(ctbNum): + rowsBatched = 0 + sql += " %s%d values "%(ctbPrefix,i) + for j in range(rowsPerTbl): + if (i < ctbNum/2): + sql += "(%d, %d, %d, %d,%d,%d,%d,true,'binary%d', 'nchar%d') "%(startTs + j*tsStep, j%10, j%10, j%10, j%10, j%10, j%10, j%10, j%10) + else: + sql += "(%d, %d, NULL, %d,NULL,%d,%d,true,'binary%d', 'nchar%d') "%(startTs + j*tsStep, j%10, j%10, j%10, j%10, j%10, j%10) + rowsBatched += 1 + if ((rowsBatched == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsBatched = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s%d values " %(ctbPrefix,i) + else: + sql = "insert into " + if sql != pre_insert: + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'test', + 'dropFlag': 1, + 'vgroups': 2, + 'stbName': 'meters', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'FLOAT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'smallint', 'count':1},{'type': 'tinyint', 'count':1},{'type': 'bool', 'count':1},{'type': 'binary', 'len':10, 'count':1},{'type': 'nchar', 'len':10, 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'nchar', 'len':20, 'count':1},{'type': 'binary', 'len':20, 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'smallint', 'count':1},{'type': 'DOUBLE', 'count':1}], + 'ctbPrefix': 't', + 'ctbStartIdx': 0, + 'ctbNum': 100, + 'rowsPerTbl': 10000, + 'batchNum': 3000, + 'startTs': 1537146000000, + 'tsStep': 600000} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tdLog.info("create database") + self.create_database(tsql=tdSql, dbName=paraDict["dbName"], dropFlag=paraDict["dropFlag"], vgroups=paraDict["vgroups"], replica=self.replicaVar, duration=self.duraion) + + tdLog.info("create stb") + self.create_stable(tsql=tdSql, paraDict=paraDict) + + tdLog.info("create child tables") + self.create_ctable(tsql=tdSql, dbName=paraDict["dbName"], \ + stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"],\ + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict["ctbStartIdx"]) + self.insert_data(tsql=tdSql, dbName=paraDict["dbName"],\ + ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"],\ + rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],\ + startTs=paraDict["startTs"],tsStep=paraDict["tsStep"]) + return + + def check_first_rows(self, all_rows, limited_rows, offset: int = 0): + for i in range(0, len(limited_rows) - 1): + if limited_rows[i] != all_rows[i + offset]: + tdLog.info("row: %d, row in all: %s" % (i+offset+1, str(all_rows[i+offset]))) + tdLog.info("row: %d, row in limted: %s" % (i+1, str(limited_rows[i]))) + tdLog.exit("row data check failed") + tdLog.info("all rows are the same as query without limit..") + + def query_and_check_with_slimit(self, sql: str, max_limit: int, step: int, offset: int = 0): + self.query_and_check_with_limit(sql, max_limit, step, offset, ' slimit ') + + def query_and_check_with_limit(self, sql: str, max_limit: int, step: int, offset: int = 0, limit_str: str = ' limit '): + for limit in range(0, max_limit, step): + limited_sql = sql + limit_str + str(offset) + "," + str(limit) + tdLog.info("query with sql: %s " % (sql) + limit_str + " %d,%d" % (offset, limit)) + all_rows = tdSql.getResult(sql) + limited_rows = tdSql.getResult(limited_sql) + tdLog.info("all rows: %d, limited rows: %d" % (len(all_rows), len(limited_rows))) + if limit_str == ' limit ': + if limit + offset <= len(all_rows) and len(limited_rows) != limit: + tdLog.exit("limited sql has less rows than limit value which is not right, \ + limit: %d, limited_rows: %d, all_rows: %d, offset: %d" % (limit, len(limited_rows), len(all_rows), offset)) + elif limit + offset > len(all_rows) and offset < len(all_rows) and offset + len(limited_rows) != len(all_rows): + tdLog.exit("limited sql has less rows than all_rows which is not right, \ + limit: %d, limited_rows: %d, all_rows: %d, offset: %d" % (limit, len(limited_rows), len(all_rows), offset)) + elif offset >= len(all_rows) and len(limited_rows) != 0: + tdLog.exit("limited rows should be zero, \ + limit: %d, limited_rows: %d, all_rows: %d, offset: %d" % (limit, len(limited_rows), len(all_rows), offset)) + + self.check_first_rows(all_rows, limited_rows, offset) + + def test_interval_limit_offset(self): + for offset in range(0, 1000, 500): + self.test_interval_fill_limit(offset) + self.test_interval_order_by_limit(offset) + self.test_interval_partition_by_slimit(offset) + + def test_interval_fill_limit(self, offset: int = 0): + sqls = [ + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1s) fill(linear)", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1m) fill(linear)", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1h) fill(linear)", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1d) fill(linear)" + ] + for sql in sqls: + self.query_and_check_with_limit(sql, 5000, 1000, offset) + + def test_interval_order_by_limit(self, offset: int = 0): + sqls = [ + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by b", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by a desc", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), last(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by a desc", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by count(*), sum(c1), a", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by a, count(*), sum(c1)", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by b", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by a desc", + "select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by a desc", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by count(*), sum(c1), a", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by a, count(*), sum(c1)", + ] + for sql in sqls: + self.query_and_check_with_limit(sql, 6000, 2000, offset) + + def test_interval_partition_by_slimit(self, offset: int = 0): + sqls = [ + "select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters " + "where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by t1 interval(1m)", + "select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters " + "where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by t1 interval(1h)", + "select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters " + "where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by c3 interval(1m)", + ] + for sql in sqls: + self.query_and_check_with_slimit(sql, 10, 2, offset) + + def test_group_by_operator(self): + tdSql.query('select count(*), c1+1 from meters group by tbname, c1+1', 1) + + def run(self): + self.prepareTestEnv() + self.test_group_by_operator() + self.test_interval_limit_offset() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/stbJoin.py b/tests/system-test/2-query/stbJoin.py index 677704648c..6eb95349fe 100644 --- a/tests/system-test/2-query/stbJoin.py +++ b/tests/system-test/2-query/stbJoin.py @@ -112,6 +112,18 @@ class TDTestCase: tdSql.query(f"select a.* from sta a join stb b on a.tg1 != b.tg1 and a.ts=b.ts;") tdSql.checkRows(36) + tdSql.query(f"select a.* from sta a join stb b on a.ts=b.ts and a.ts is null;") + tdSql.checkRows(0) + + tdSql.query(f"select a.* from sta a join stb b on a.ts=b.ts and a.ts is not null;") + tdSql.checkRows(48) + + tdSql.query(f"select a.* from sta a ,stb b where a.ts=b.ts and a.ts is null;") + tdSql.checkRows(0) + + tdSql.query(f"select a.* from sta a ,stb b where a.ts=b.ts and a.ts is not null;") + tdSql.checkRows(48) + # tdSql.checkData(0,1,10) tdSql.error(f"select a.* from sta a join stb b on a.tg1=b.tg1 where a.ts=b.ts or a.tg2=b.tg2;") diff --git a/tests/system-test/2-query/union.py b/tests/system-test/2-query/union.py index 9086d7754d..547ab07eb0 100644 --- a/tests/system-test/2-query/union.py +++ b/tests/system-test/2-query/union.py @@ -249,6 +249,9 @@ class TDTestCase: tdSql.checkRows(14) tdSql.query(f"select derivative(c1, 1s, 0) from (select * from {dbname}.t1 union select * from {dbname}.t1 order by ts)") tdSql.checkRows(11) + tdSql.query(f"select count(*) from {dbname}.t1 as a join {dbname}.t1 as b on a.ts = b.ts and a.ts is null") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) tdSql.error(f"select first(c1) from (select * from {dbname}.t1 union select * from {dbname}.t1)") tdSql.error(f"select last(c1) from (select * from {dbname}.t1 union select * from {dbname}.t1)") diff --git a/tests/system-test/7-tmq/tmqParamsTest.py b/tests/system-test/7-tmq/tmqParamsTest.py index ff7c70bcd2..0e9e8f989f 100644 --- a/tests/system-test/7-tmq/tmqParamsTest.py +++ b/tests/system-test/7-tmq/tmqParamsTest.py @@ -19,7 +19,7 @@ class TDTestCase: self.wal_retention_period1 = 3600 self.wal_retention_period2 = 1 self.commit_value_list = ["true", "false"] - self.offset_value_list = ["", "earliest", "latest", "none"] + self.offset_value_list = ["earliest", "latest", "none"] self.tbname_value_list = ["true", "false"] self.snapshot_value_list = ["false"] @@ -92,7 +92,7 @@ class TDTestCase: } consumer_commit = 1 if consumer_dict["enable.auto.commit"] == "true" else 0 consumer_tbname = 1 if consumer_dict["msg.with.table.name"] == "true" else 0 - consumer_ret = "earliest" if offset_value == "" else offset_value + consumer_ret = "latest" if offset_value == "" else offset_value expected_parameters=f'tbname:{consumer_tbname},commit:{consumer_commit},interval:{paraDict["auto_commit_interval"]}ms,reset:{consumer_ret}' if len(offset_value) == 0: del consumer_dict["auto.offset.reset"] diff --git a/tests/system-test/8-stream/at_once_interval.py b/tests/system-test/8-stream/at_once_interval.py index 020b5f2a17..8f5438be37 100644 --- a/tests/system-test/8-stream/at_once_interval.py +++ b/tests/system-test/8-stream/at_once_interval.py @@ -70,6 +70,9 @@ class TDTestCase: fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11' self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value) start_time = self.tdCom.date_time + + time.sleep(1) + for i in range(self.tdCom.range_count): ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s' ts_cast_delete_value = self.tdCom.time_cast(ts_value) diff --git a/tests/system-test/8-stream/partition_interval.py b/tests/system-test/8-stream/partition_interval.py index 0424932bf8..3692408de0 100644 --- a/tests/system-test/8-stream/partition_interval.py +++ b/tests/system-test/8-stream/partition_interval.py @@ -35,6 +35,9 @@ class TDTestCase: # create stb/ctb/tb stream self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=source_sql, ignore_expired=ignore_expired) + + time.sleep(1) + # insert data count = 1 step_count = 1 diff --git a/tests/system-test/simpletest.bat b/tests/system-test/simpletest.bat index 5ae2d3feb3..31b76cad4a 100644 --- a/tests/system-test/simpletest.bat +++ b/tests/system-test/simpletest.bat @@ -18,7 +18,7 @@ python3 .\test.py -f 1-insert\influxdb_line_taosc_insert.py @REM #python3 .\test.py -f 1-insert\test_stmt_muti_insert_query.py @REM python3 .\test.py -f 1-insert\alter_stable.py @REM python3 .\test.py -f 1-insert\alter_table.py -@REM python3 .\test.py -f 2-query\between.py +python3 .\test.py -f 2-query\between.py @REM python3 .\test.py -f 2-query\distinct.py @REM python3 .\test.py -f 2-query\varchar.py @REM python3 .\test.py -f 2-query\ltrim.py @@ -101,3 +101,4 @@ python3 .\test.py -f 7-tmq\subscribeStb.py @REM python3 .\test.py -f 7-tmq\subscribeStb3.py @REM python3 .\test.py -f 7-tmq\subscribeStb4.py @REM python3 .\test.py -f 7-tmq\db.py +python3 .\test.py -f 6-cluster\5dnode3mnodeSep1VnodeStopDnodeModifyMeta.py -N 6 -M 3 \ No newline at end of file diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 41cdb0f928..60d6388faa 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -66,6 +66,8 @@ SWords shellCommands[] = { {"alter dnode \"debugFlag\" \"141\";", 0, 0, NULL}, {"alter dnode \"monitor\" \"0\";", 0, 0, NULL}, {"alter dnode \"monitor\" \"1\";", 0, 0, NULL}, + {"alter dnode \"asynclog\" \"0\";", 0, 0, NULL}, + {"alter dnode \"asynclog\" \"1\";", 0, 0, NULL}, {"alter all dnodes \"resetlog\";", 0, 0, NULL}, {"alter all dnodes \"debugFlag\" \"141\";", 0, 0, NULL}, {"alter all dnodes \"monitor\" \"0\";", 0, 0, NULL}, @@ -77,6 +79,8 @@ SWords shellCommands[] = { {"alter local \"uDebugFlag\" \"143\";", 0, 0, NULL}, {"alter local \"rpcDebugFlag\" \"143\";", 0, 0, NULL}, {"alter local \"tmrDebugFlag\" \"143\";", 0, 0, NULL}, + {"alter local \"asynclog\" \"0\";", 0, 0, NULL}, + {"alter local \"asynclog\" \"1\";", 0, 0, NULL}, {"alter topic", 0, 0, NULL}, {"alter user ;", 0, 0, NULL}, // 20 @@ -184,7 +188,7 @@ SWords shellCommands[] = { {"show grants;", 0, 0, NULL}, #ifdef TD_ENTERPRISE {"split vgroup ", 0, 0, NULL}, -#endif +#endif {"insert into values(", 0, 0, NULL}, {"insert into using tags(", 0, 0, NULL}, {"insert into using values(", 0, 0, NULL}, @@ -391,13 +395,19 @@ void showHelp() { alter dnode 'monitor' '0';\n\ alter dnode 'monitor' \"1\";\n\ alter dnode \"debugflag\" \"143\";\n\ + alter dnode 'asynclog' '0';\n\ + alter dnode 'asynclog' \"1\";\n\ alter all dnodes \"monitor\" \"0\";\n\ alter all dnodes \"monitor\" \"1\";\n\ alter all dnodes \"resetlog\";\n\ alter all dnodes \"debugFlag\" \n\ + alter all dnodes \"asynclog\" \"0\";\n\ + alter all dnodes \"asynclog\" \"1\";\n\ alter table ;\n\ alter local \"resetlog\";\n\ alter local \"DebugFlag\" \"143\";\n\ + alter local \"asynclog\" \"0\";\n\ + alter local \"asynclog\" \"1\";\n\ alter topic\n\ alter user ...\n\ ----- C ----- \n\ diff --git a/tools/shell/src/shellWebsocket.c b/tools/shell/src/shellWebsocket.c index ff2e5efdd4..e83ceff099 100644 --- a/tools/shell/src/shellWebsocket.c +++ b/tools/shell/src/shellWebsocket.c @@ -260,7 +260,7 @@ void shellRunSingleCommandWebsocketImp(char *command) { WS_RES* res; for (int reconnectNum = 0; reconnectNum < 2; reconnectNum++) { - if (!shell.ws_conn && shell_conn_ws_server(0)) { + if (!shell.ws_conn && shell_conn_ws_server(0) || shell.stop_query) { return; } st = taosGetTimestampUs(); diff --git a/utils/test/c/tmq_taosx_ci.c b/utils/test/c/tmq_taosx_ci.c index 5d4d73c448..ff89bb1f75 100644 --- a/utils/test/c/tmq_taosx_ci.c +++ b/utils/test/c/tmq_taosx_ci.c @@ -547,6 +547,7 @@ tmq_t* build_consumer() { tmq_conf_set(conf, "td.connect.pass", "taosdata"); tmq_conf_set(conf, "msg.with.table.name", "true"); tmq_conf_set(conf, "enable.auto.commit", "true"); + tmq_conf_set(conf, "auto.offset.reset", "earliest"); if (g_conf.snapShot) { tmq_conf_set(conf, "experimental.snapshot.enable", "true");