Merge branch '3.0' into enh/TD-23769-3.0
This commit is contained in:
commit
8c8bcabdd4
|
@ -174,7 +174,7 @@ Use curl to verify that the TDengine REST API is working on port 6041:
|
|||
```
|
||||
$ curl -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
|
||||
Handling connection for 6041
|
||||
{"code":0,"column_meta":[["name","VARCHAR",64],["create_time","TIMESTAMP",8],["vgroups","SMALLINT",2],["ntables","BIGINT",8],["replica","TINYINT",1],["strict","VARCHAR",4],["duration","VARCHAR",10],["keep","VARCHAR",32],["buffer","INT",4],["pagesize","INT",4],["pages","INT",4],["minrows","INT",4],["maxrows","INT",4],["comp","TINYINT",1],["precision","VARCHAR",2],["status","VARCHAR",10],["retention","VARCHAR",60],["single_stable","BOOL",1],["cachemodel","VARCHAR",11],["cachesize","INT",4],["wal_level","TINYINT",1],["wal_fsync_period","INT",4],["wal_retention_period","INT",4],["wal_retention_size","BIGINT",8],["wal_roll_period","INT",4],["wal_segment_size","BIGINT",8]],"data":[["information_schema",null,null,16,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null],["performance_schema",null,null,10,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null]],"rows":2}
|
||||
{"code":0,"column_meta":[["name","VARCHAR",64],["create_time","TIMESTAMP",8],["vgroups","SMALLINT",2],["ntables","BIGINT",8],["replica","TINYINT",1],["strict","VARCHAR",4],["duration","VARCHAR",10],["keep","VARCHAR",32],["buffer","INT",4],["pagesize","INT",4],["pages","INT",4],["minrows","INT",4],["maxrows","INT",4],["comp","TINYINT",1],["precision","VARCHAR",2],["status","VARCHAR",10],["retention","VARCHAR",60],["single_stable","BOOL",1],["cachemodel","VARCHAR",11],["cachesize","INT",4],["wal_level","TINYINT",1],["wal_fsync_period","INT",4],["wal_retention_period","INT",4],["wal_retention_size","BIGINT",8]],"data":[["information_schema",null,null,16,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null],["performance_schema",null,null,10,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null]],"rows":2}
|
||||
```
|
||||
|
||||
## Enable the dashboard for visualization
|
||||
|
|
|
@ -36,8 +36,6 @@ database_option: {
|
|||
| TSDB_PAGESIZE value
|
||||
| WAL_RETENTION_PERIOD value
|
||||
| WAL_RETENTION_SIZE value
|
||||
| WAL_ROLL_PERIOD value
|
||||
| WAL_SEGMENT_SIZE value
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -77,8 +75,6 @@ database_option: {
|
|||
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
|
||||
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep for consumption. Alter it with a proper value at first to create topics.
|
||||
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit.
|
||||
- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after TSDB data in memory are flushed to disk.
|
||||
- WAL_SEGMENT_SIZE: specifies the maximum size of a WAL file. After the current WAL file reaches this size, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after TSDB data in memory are flushed to disk.
|
||||
### Example Statement
|
||||
|
||||
```sql
|
||||
|
|
|
@ -334,8 +334,6 @@ The following list shows all reserved keywords:
|
|||
- WAL_LEVEL
|
||||
- WAL_RETENTION_PERIOD
|
||||
- WAL_RETENTION_SIZE
|
||||
- WAL_ROLL_PERIOD
|
||||
- WAL_SEGMENT_SIZE
|
||||
- WATERMARK
|
||||
- WHERE
|
||||
- WINDOW_CLOSE
|
||||
|
|
|
@ -100,12 +100,10 @@ Provides information about user-created databases. Similar to SHOW DATABASES.
|
|||
| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk. It should be noted that `wal_fsync_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 24 | wal_retention_period | INT | WAL retention period. It should be noted that `wal_retention_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 25 | wal_retention_size | INT | Maximum WAL size. It should be noted that `wal_retention_size` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 26 | wal_roll_period | INT | WAL rotation period. It should be noted that `wal_roll_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 27 | wal_segment_size | BIGINT | WAL file size. It should be noted that `wal_segment_size` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 28 | stt_trigger | SMALLINT | The threshold for number of files to trigger file merging. It should be noted that `stt_trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 29 | table_prefix | SMALLINT | The prefix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_prefix` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 30 | table_suffix | SMALLINT | The suffix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_suffix` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 31 | tsdb_pagesize | INT | The page size for internal storage engine, its unit is KB. It should be noted that `tsdb_pagesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 26 | stt_trigger | SMALLINT | The threshold for number of files to trigger file merging. It should be noted that `stt_trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 27 | table_prefix | SMALLINT | The prefix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_prefix` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 28 | table_suffix | SMALLINT | The suffix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_suffix` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 29 | tsdb_pagesize | INT | The page size for internal storage engine, its unit is KB. It should be noted that `tsdb_pagesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
||||
## INS_FUNCTIONS
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ The following data types can be used in the schema for standard tables.
|
|||
| 6 | ALTER USER | Modified | Deprecated<ul><li>PRIVILEGE: Specified user permissions. Replaced by GRANT and REVOKE. <br/>Added</li><li>ENABLE: Enables or disables a user. </li><li>SYSINFO: Specifies whether a user can query system information. </li></ul>
|
||||
| 7 | COMPACT VNODES | Not supported | Compacted the data on a vnode. Not supported.
|
||||
| 8 | CREATE ACCOUNT | Deprecated| This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
|
||||
| 9 | CREATE DATABASE | Modified | Deprecated<ul><li>BLOCKS: Specified the number of blocks for each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>CACHE: Specified the size of the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>DAYS: The length of time to store in a single file. Replaced by DURATION. </li><li>FSYNC: Specified the fsync interval when WAL was set to 2. Replaced by WAL_FSYNC_PERIOD. </li><li>QUORUM: Specified the number of confirmations required. STRICT is now used to specify strong or weak consistency. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>WAL: Specified the WAL level. Replaced by WAL_LEVEL. <br/>Added</li><li>BUFFER: Specifies the size of the write cache pool for each vnode. </li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>DURATION: Replaces DAYS. Now supports units. </li><li>PAGES: Specifies the number of pages in the metadata storage engine cache on each vnode. </li><li>PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. </li><li>RETENTIONS: Specifies the aggregation interval and retention period </li><li>STRICT: Specifies whether strong data consistency is enabled. </li><li>SINGLE_STABLE: Specifies whether a database can contain multiple supertables. </li><li>VGROUPS: Specifies the initial number of vgroups when a database is created. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_ROLL_PERIOD: Specifies the WAL rotation period. </li><li>WAL_SEGMENT_SIZE: specifies the maximum size of a WAL file. <br/>Modified</li><li>KEEP: Now supports units. </li></ul>
|
||||
| 9 | CREATE DATABASE | Modified | Deprecated<ul><li>BLOCKS: Specified the number of blocks for each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>CACHE: Specified the size of the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>DAYS: The length of time to store in a single file. Replaced by DURATION. </li><li>FSYNC: Specified the fsync interval when WAL was set to 2. Replaced by WAL_FSYNC_PERIOD. </li><li>QUORUM: Specified the number of confirmations required. STRICT is now used to specify strong or weak consistency. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>WAL: Specified the WAL level. Replaced by WAL_LEVEL. <br/>Added</li><li>BUFFER: Specifies the size of the write cache pool for each vnode. </li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>DURATION: Replaces DAYS. Now supports units. </li><li>PAGES: Specifies the number of pages in the metadata storage engine cache on each vnode. </li><li>PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. </li><li>RETENTIONS: Specifies the aggregation interval and retention period </li><li>STRICT: Specifies whether strong data consistency is enabled. </li><li>SINGLE_STABLE: Specifies whether a database can contain multiple supertables. </li><li>VGROUPS: Specifies the initial number of vgroups when a database is created. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. <br/>Modified</li><li>KEEP: Now supports units. </li></ul>
|
||||
| 10 | CREATE DNODE | Modified | Now supports specifying hostname and port separately<ul><li>CREATE DNODE dnode_host_name PORT port_val</li></ul>
|
||||
| 11 | CREATE INDEX | Added | Creates an SMA index.
|
||||
| 12 | CREATE MNODE | Added | Creates an mnode.
|
||||
|
|
|
@ -722,6 +722,16 @@ The charset that takes effect is UTF-8.
|
|||
| Value Range | 0: not change; 1: change by modification |
|
||||
| Default Value | 0 |
|
||||
|
||||
### keepTimeOffset
|
||||
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Latency of data migration |
|
||||
| Unit | hour |
|
||||
| Value Range | 0-23 |
|
||||
| Default Value | 0 |
|
||||
|
||||
## 3.0 Parameters
|
||||
|
||||
| # | **Parameter** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
|
||||
|
@ -779,3 +789,4 @@ The charset that takes effect is UTF-8.
|
|||
| 53 | udf | Yes | Yes | |
|
||||
| 54 | enableCoreFile | Yes | Yes | |
|
||||
| 55 | ttlChangeOnWrite | No | Yes | |
|
||||
| 56 | keepTimeOffset | Yes | Yes | |
|
||||
|
|
|
@ -2,10 +2,10 @@
|
|||
|
||||
```text
|
||||
taos> show databases;
|
||||
name | create_time | vgroups | ntables | replica | strict | duration | keep | buffer | pagesize | pages | minrows | maxrows | comp | precision | status | retention | single_stable | cachemodel | cachesize | wal_level | wal_fsync_period | wal_retention_period | wal_retention_size | wal_roll_period | wal_seg_size |
|
||||
=========================================================================================================================================================================================================================================================================================================================================================================================================================================================================
|
||||
information_schema | NULL | NULL | 14 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
||||
performance_schema | NULL | NULL | 3 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
||||
name | create_time | vgroups | ntables | replica | strict | duration | keep | buffer | pagesize | pages | minrows | maxrows | comp | precision | status | retention | single_stable | cachemodel | cachesize | wal_level | wal_fsync_period | wal_retention_period | wal_retention_size |
|
||||
===============================================================================================================================================================================================================================================================================================================================================================================================================================
|
||||
information_schema | NULL | NULL | 14 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
||||
performance_schema | NULL | NULL | 3 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
||||
test | 2022-08-04 16:46:40.506 | 2 | 0 | 1 | off | 14400m | 5256000m,5256000m,5256000m | 96 | 4 | 256 |
|
||||
100 | 4096 | 2 | ms | ready | NULL | false | none | 1 | 1 | 3000 | 0 | 0 | 0 | 0 |
|
||||
Query OK, 3 rows in database (0.123000s)
|
||||
|
|
|
@ -174,7 +174,7 @@ kubectl port-forward tdengine-0 6041:6041 &
|
|||
```
|
||||
$ curl -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
|
||||
Handling connection for 6041
|
||||
{"code":0,"column_meta":[["name","VARCHAR",64],["create_time","TIMESTAMP",8],["vgroups","SMALLINT",2],["ntables","BIGINT",8],["replica","TINYINT",1],["strict","VARCHAR",4],["duration","VARCHAR",10],["keep","VARCHAR",32],["buffer","INT",4],["pagesize","INT",4],["pages","INT",4],["minrows","INT",4],["maxrows","INT",4],["comp","TINYINT",1],["precision","VARCHAR",2],["status","VARCHAR",10],["retention","VARCHAR",60],["single_stable","BOOL",1],["cachemodel","VARCHAR",11],["cachesize","INT",4],["wal_level","TINYINT",1],["wal_fsync_period","INT",4],["wal_retention_period","INT",4],["wal_retention_size","BIGINT",8],["wal_roll_period","INT",4],["wal_segment_size","BIGINT",8]],"data":[["information_schema",null,null,16,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null],["performance_schema",null,null,10,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null]],"rows":2}
|
||||
{"code":0,"column_meta":[["name","VARCHAR",64],["create_time","TIMESTAMP",8],["vgroups","SMALLINT",2],["ntables","BIGINT",8],["replica","TINYINT",1],["strict","VARCHAR",4],["duration","VARCHAR",10],["keep","VARCHAR",32],["buffer","INT",4],["pagesize","INT",4],["pages","INT",4],["minrows","INT",4],["maxrows","INT",4],["comp","TINYINT",1],["precision","VARCHAR",2],["status","VARCHAR",10],["retention","VARCHAR",60],["single_stable","BOOL",1],["cachemodel","VARCHAR",11],["cachesize","INT",4],["wal_level","TINYINT",1],["wal_fsync_period","INT",4],["wal_retention_period","INT",4],["wal_retention_size","BIGINT",8]],"data":[["information_schema",null,null,16,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null],["performance_schema",null,null,10,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null]],"rows":2}
|
||||
```
|
||||
|
||||
## 使用 dashboard 进行图形化管理
|
||||
|
|
|
@ -36,7 +36,6 @@ database_option: {
|
|||
| TSDB_PAGESIZE value
|
||||
| WAL_RETENTION_PERIOD value
|
||||
| WAL_RETENTION_SIZE value
|
||||
| WAL_SEGMENT_SIZE value
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -76,8 +75,6 @@ database_option: {
|
|||
- TSDB_PAGESIZE:一个 VNODE 中时序数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB到 16 MB。
|
||||
- WAL_RETENTION_PERIOD: 为了数据订阅消费,需要WAL日志文件额外保留的最大时长策略。WAL日志清理,不受订阅客户端消费状态影响。单位为 s。默认为 0,表示无需为订阅保留。新建订阅,应先设置恰当的时长策略。
|
||||
- WAL_RETENTION_SIZE:为了数据订阅消费,需要WAL日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0,表示累计大小无上限。
|
||||
- WAL_ROLL_PERIOD:wal 文件切换时长,单位为 s。当WAL文件创建并写入后,经过该时间,会自动创建一个新的WAL文件。默认为 0,即仅在TSDB落盘时创建新文件。
|
||||
- WAL_SEGMENT_SIZE:wal 单个文件大小,单位为 KB。当前写入文件大小超过上限后会自动创建一个新的WAL文件。默认为 0,即仅在TSDB落盘时创建新文件。
|
||||
### 创建数据库示例
|
||||
|
||||
```sql
|
||||
|
|
|
@ -334,8 +334,6 @@ description: TDengine 保留关键字的详细列表
|
|||
- WAL_LEVEL
|
||||
- WAL_RETENTION_PERIOD
|
||||
- WAL_RETENTION_SIZE
|
||||
- WAL_ROLL_PERIOD
|
||||
- WAL_SEGMENT_SIZE
|
||||
- WATERMARK
|
||||
- WHERE
|
||||
- WINDOW_CLOSE
|
||||
|
|
|
@ -100,12 +100,10 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 23 | wal_fsync_period | INT | 数据落盘周期。需要注意,`wal_fsync_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 24 | wal_retention_period | INT | WAL 的保存时长。需要注意,`wal_retention_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 25 | wal_retention_size | INT | WAL 的保存上限。需要注意,`wal_retention_size` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 26 | wal_roll_period | INT | wal 文件切换时长。需要注意,`wal_roll_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 27 | wal_segment_size | BIGINT | wal 单个文件大小。需要注意,`wal_segment_size` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 28 | stt_trigger | SMALLINT | 触发文件合并的落盘文件的个数。需要注意,`stt_trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 29 | table_prefix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。需要注意,`table_prefix` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 30 | table_suffix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。需要注意,`table_suffix` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 31 | tsdb_pagesize | INT | 时序数据存储引擎中的页大小。需要注意,`tsdb_pagesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 26 | stt_trigger | SMALLINT | 触发文件合并的落盘文件的个数。需要注意,`stt_trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 27 | table_prefix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。需要注意,`table_prefix` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 28 | table_suffix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。需要注意,`table_suffix` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 29 | tsdb_pagesize | INT | 时序数据存储引擎中的页大小。需要注意,`tsdb_pagesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
||||
## INS_FUNCTIONS
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ description: "TDengine 3.0 版本的语法变更说明"
|
|||
| 6 | ALTER USER | 调整 | 废除<ul><li>PRIVILEGE:修改用户权限。3.0版本使用GRANT和REVOKE来授予和回收权限。<br/>新增</li><li>ENABLE:启用或停用此用户。</li><li>SYSINFO:修改用户是否可查看系统信息。</li></ul>
|
||||
| 7 | COMPACT VNODES | 暂不支持 | 整理指定VNODE的数据。3.0.0版本暂不支持。
|
||||
| 8 | CREATE ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
|
||||
| 9 | CREATE DATABASE | 调整 | <p>废除</p><ul><li>BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHE:VNODE使用的内存块的大小。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>DAYS:数据文件存储数据的时间跨度。3.0版本使用DURATION代替。</li><li>FSYNC:当 WAL 设置为 2 时,执行 fsync 的周期。3.0版本使用WAL_FSYNC_PERIOD代替。</li><li>QUORUM:写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。</li><li>UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>WAL:WAL 级别。3.0版本使用WAL_LEVEL代替。</li></ul><p>新增</p><ul><li>BUFFER:一个 VNODE 写入内存池大小。</li><li>CACHEMODEL:表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE:表示缓存子表最近数据的内存大小。</li><li>DURATION:代替原DAYS参数。新增支持带单位的设置方式。</li><li>PAGES:一个 VNODE 中元数据存储引擎的缓存页个数。</li><li>PAGESIZE:一个 VNODE 中元数据存储引擎的页大小。</li><li>RETENTIONS:表示数据的聚合周期和保存时长。</li><li>STRICT:表示数据同步的一致性要求。</li><li>SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表。</li><li>VGROUPS:数据库中初始VGROUP的数目。</li><li>WAL_FSYNC_PERIOD:代替原FSYNC参数。</li><li>WAL_LEVEL:代替原WAL参数。</li><li>WAL_RETENTION_PERIOD:wal文件的额外保留策略,用于数据订阅。</li><li>WAL_RETENTION_SIZE:wal文件的额外保留策略,用于数据订阅。</li><li>WAL_ROLL_PERIOD:wal文件切换时长。</li><li>WAL_SEGMENT_SIZE:wal单个文件大小。</li></ul><p>调整</p><ul><li>KEEP:3.0版本新增支持带单位的设置方式。</li></ul>
|
||||
| 9 | CREATE DATABASE | 调整 | <p>废除</p><ul><li>BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHE:VNODE使用的内存块的大小。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>DAYS:数据文件存储数据的时间跨度。3.0版本使用DURATION代替。</li><li>FSYNC:当 WAL 设置为 2 时,执行 fsync 的周期。3.0版本使用WAL_FSYNC_PERIOD代替。</li><li>QUORUM:写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。</li><li>UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>WAL:WAL 级别。3.0版本使用WAL_LEVEL代替。</li></ul><p>新增</p><ul><li>BUFFER:一个 VNODE 写入内存池大小。</li><li>CACHEMODEL:表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE:表示缓存子表最近数据的内存大小。</li><li>DURATION:代替原DAYS参数。新增支持带单位的设置方式。</li><li>PAGES:一个 VNODE 中元数据存储引擎的缓存页个数。</li><li>PAGESIZE:一个 VNODE 中元数据存储引擎的页大小。</li><li>RETENTIONS:表示数据的聚合周期和保存时长。</li><li>STRICT:表示数据同步的一致性要求。</li><li>SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表。</li><li>VGROUPS:数据库中初始VGROUP的数目。</li><li>WAL_FSYNC_PERIOD:代替原FSYNC参数。</li><li>WAL_LEVEL:代替原WAL参数。</li><li>WAL_RETENTION_PERIOD:wal文件的额外保留策略,用于数据订阅。</li><li>WAL_RETENTION_SIZE:wal文件的额外保留策略,用于数据订阅。</li></ul><p>调整</p><ul><li>KEEP:3.0版本新增支持带单位的设置方式。</li></ul>
|
||||
| 10 | CREATE DNODE | 调整 | 新增主机名和端口号分开指定语法<ul><li>CREATE DNODE dnode_host_name PORT port_val</li></ul>
|
||||
| 11 | CREATE INDEX | 新增 | 创建SMA索引。
|
||||
| 12 | CREATE MNODE | 新增 | 创建管理节点。
|
||||
|
|
|
@ -726,6 +726,16 @@ charset 的有效值是 UTF-8。
|
|||
| 取值范围 | 0: 不改变;1:改变 |
|
||||
| 缺省值 | 0 |
|
||||
|
||||
### keepTimeOffset
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------ |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 迁移操作的延时 |
|
||||
| 单位 | 小时 |
|
||||
| 取值范围 | 0-23 |
|
||||
| 缺省值 | 0 |
|
||||
|
||||
## 压缩参数
|
||||
|
||||
### compressMsgSize
|
||||
|
@ -794,6 +804,7 @@ charset 的有效值是 UTF-8。
|
|||
| 53 | udf | 是 | 是 | |
|
||||
| 54 | enableCoreFile | 是 | 是 | |
|
||||
| 55 | ttlChangeOnWrite | 否 | 是 | |
|
||||
| 56 | keepTimeOffset | 是 | 是 | |
|
||||
|
||||
## 2.x->3.0 的废弃参数
|
||||
|
||||
|
@ -808,76 +819,74 @@ charset 的有效值是 UTF-8。
|
|||
| 7 | offlineThreshold | 是 | 否 | 3.0 行为未知 |
|
||||
| 8 | role | 是 | 否 | 由 supportVnode 决定是否能够创建 |
|
||||
| 9 | dnodeNopLoop | 是 | 否 | 2.6 文档中未找到此参数 |
|
||||
| 10 | keepTimeOffset | 是 | 否 | 2.6 文档中未找到此参数 |
|
||||
| 11 | rpcTimer | 是 | 否 | 3.0 行为未知 |
|
||||
| 12 | rpcMaxTime | 是 | 否 | 3.0 行为未知 |
|
||||
| 13 | rpcForceTcp | 是 | 否 | 默认为 TCP |
|
||||
| 14 | tcpConnTimeout | 是 | 否 | 3.0 行为未知 |
|
||||
| 15 | syncCheckInterval | 是 | 否 | 3.0 行为未知 |
|
||||
| 16 | maxTmrCtrl | 是 | 否 | 3.0 行为未知 |
|
||||
| 17 | monitorReplica | 是 | 否 | 由 RAFT 协议管理多副本 |
|
||||
| 18 | smlTagNullName | 是 | 否 | 3.0 行为未知 |
|
||||
| 20 | ratioOfQueryCores | 是 | 否 | 由 线程池 相关配置参数决定 |
|
||||
| 21 | maxStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||
| 22 | maxFirstStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||
| 23 | retryStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||
| 24 | streamCompDelayRatio | 是 | 否 | 3.0 行为未知 |
|
||||
| 25 | maxVgroupsPerDb | 是 | 否 | 由 create db 的参数 vgroups 指定实际 vgroups 数量 |
|
||||
| 26 | maxTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||
| 27 | minTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||
| 28 | tableIncStepPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||
| 29 | cache | 是 | 否 | 由 buffer 代替 cache\*blocks |
|
||||
| 30 | blocks | 是 | 否 | 由 buffer 代替 cache\*blocks |
|
||||
| 31 | days | 是 | 否 | 由 create db 的参数 duration 取代 |
|
||||
| 32 | keep | 是 | 否 | 由 create db 的参数 keep 取代 |
|
||||
| 33 | minRows | 是 | 否 | 由 create db 的参数 minRows 取代 |
|
||||
| 34 | maxRows | 是 | 否 | 由 create db 的参数 maxRows 取代 |
|
||||
| 35 | quorum | 是 | 否 | 由 RAFT 协议决定 |
|
||||
| 36 | comp | 是 | 否 | 由 create db 的参数 comp 取代 |
|
||||
| 37 | walLevel | 是 | 否 | 由 create db 的参数 wal_level 取代 |
|
||||
| 38 | fsync | 是 | 否 | 由 create db 的参数 wal_fsync_period 取代 |
|
||||
| 39 | replica | 是 | 否 | 由 create db 的参数 replica 取代 |
|
||||
| 40 | partitions | 是 | 否 | 3.0 行为未知 |
|
||||
| 41 | update | 是 | 否 | 允许更新部分列 |
|
||||
| 42 | cachelast | 是 | 否 | 由 create db 的参数 cacheModel 取代 |
|
||||
| 43 | maxSQLLength | 是 | 否 | SQL 上限为 1MB,无需参数控制 |
|
||||
| 44 | maxWildCardsLength | 是 | 否 | 3.0 行为未知 |
|
||||
| 45 | maxRegexStringLen | 是 | 否 | 3.0 行为未知 |
|
||||
| 46 | maxNumOfOrderedRes | 是 | 否 | 3.0 行为未知 |
|
||||
| 47 | maxConnections | 是 | 否 | 取决于系统配置和系统处理能力,详见后面的 Note |
|
||||
| 48 | mnodeEqualVnodeNum | 是 | 否 | 3.0 行为未知 |
|
||||
| 49 | http | 是 | 否 | http 服务由 taosAdapter 提供 |
|
||||
| 50 | httpEnableRecordSql | 是 | 否 | taosd 不提供 http 服务 |
|
||||
| 51 | httpMaxThreads | 是 | 否 | taosd 不提供 http 服务 |
|
||||
| 52 | restfulRowLimit | 是 | 否 | taosd 不提供 http 服务 |
|
||||
| 53 | httpDbNameMandatory | 是 | 否 | taosd 不提供 http 服务 |
|
||||
| 54 | httpKeepAlive | 是 | 否 | taosd 不提供 http 服务 |
|
||||
| 55 | enableRecordSql | 是 | 否 | 3.0 行为未知 |
|
||||
| 56 | maxBinaryDisplayWidth | 是 | 否 | 3.0 行为未知 |
|
||||
| 57 | stream | 是 | 否 | 默认启用连续查询 |
|
||||
| 58 | retrieveBlockingModel | 是 | 否 | 3.0 行为未知 |
|
||||
| 59 | tsdbMetaCompactRatio | 是 | 否 | 3.0 行为未知 |
|
||||
| 60 | defaultJSONStrType | 是 | 否 | 3.0 行为未知 |
|
||||
| 61 | walFlushSize | 是 | 否 | 3.0 行为未知 |
|
||||
| 62 | keepTimeOffset | 是 | 否 | 3.0 行为未知 |
|
||||
| 63 | flowctrl | 是 | 否 | 3.0 行为未知 |
|
||||
| 64 | slaveQuery | 是 | 否 | 3.0 行为未知: slave vnode 是否能够处理查询? |
|
||||
| 65 | adjustMaster | 是 | 否 | 3.0 行为未知 |
|
||||
| 66 | topicBinaryLen | 是 | 否 | 3.0 行为未知 |
|
||||
| 67 | telegrafUseFieldNum | 是 | 否 | 3.0 行为未知 |
|
||||
| 68 | deadLockKillQuery | 是 | 否 | 3.0 行为未知 |
|
||||
| 69 | clientMerge | 是 | 否 | 3.0 行为未知 |
|
||||
| 70 | sdbDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 71 | odbcDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 72 | httpDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 73 | monDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 74 | cqDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 75 | shortcutFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 76 | probeSeconds | 是 | 否 | 3.0 行为未知 |
|
||||
| 77 | probeKillSeconds | 是 | 否 | 3.0 行为未知 |
|
||||
| 78 | probeInterval | 是 | 否 | 3.0 行为未知 |
|
||||
| 79 | lossyColumns | 是 | 否 | 3.0 行为未知 |
|
||||
| 80 | fPrecision | 是 | 否 | 3.0 行为未知 |
|
||||
| 81 | dPrecision | 是 | 否 | 3.0 行为未知 |
|
||||
| 82 | maxRange | 是 | 否 | 3.0 行为未知 |
|
||||
| 83 | range | 是 | 否 | 3.0 行为未知 |
|
||||
| 10 | rpcTimer | 是 | 否 | 3.0 行为未知 |
|
||||
| 11 | rpcMaxTime | 是 | 否 | 3.0 行为未知 |
|
||||
| 12 | rpcForceTcp | 是 | 否 | 默认为 TCP |
|
||||
| 13 | tcpConnTimeout | 是 | 否 | 3.0 行为未知 |
|
||||
| 14 | syncCheckInterval | 是 | 否 | 3.0 行为未知 |
|
||||
| 15 | maxTmrCtrl | 是 | 否 | 3.0 行为未知 |
|
||||
| 16 | monitorReplica | 是 | 否 | 由 RAFT 协议管理多副本 |
|
||||
| 17 | smlTagNullName | 是 | 否 | 3.0 行为未知 |
|
||||
| 18 | ratioOfQueryCores | 是 | 否 | 由 线程池 相关配置参数决定 |
|
||||
| 19 | maxStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||
| 20 | maxFirstStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||
| 21 | retryStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||
| 22 | streamCompDelayRatio | 是 | 否 | 3.0 行为未知 |
|
||||
| 23 | maxVgroupsPerDb | 是 | 否 | 由 create db 的参数 vgroups 指定实际 vgroups 数量 |
|
||||
| 24 | maxTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||
| 25 | minTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||
| 26 | tableIncStepPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||
| 27 | cache | 是 | 否 | 由 buffer 代替 cache\*blocks |
|
||||
| 28 | blocks | 是 | 否 | 由 buffer 代替 cache\*blocks |
|
||||
| 29 | days | 是 | 否 | 由 create db 的参数 duration 取代 |
|
||||
| 30 | keep | 是 | 否 | 由 create db 的参数 keep 取代 |
|
||||
| 31 | minRows | 是 | 否 | 由 create db 的参数 minRows 取代 |
|
||||
| 32 | maxRows | 是 | 否 | 由 create db 的参数 maxRows 取代 |
|
||||
| 33 | quorum | 是 | 否 | 由 RAFT 协议决定 |
|
||||
| 34 | comp | 是 | 否 | 由 create db 的参数 comp 取代 |
|
||||
| 35 | walLevel | 是 | 否 | 由 create db 的参数 wal_level 取代 |
|
||||
| 36 | fsync | 是 | 否 | 由 create db 的参数 wal_fsync_period 取代 |
|
||||
| 37 | replica | 是 | 否 | 由 create db 的参数 replica 取代 |
|
||||
| 38 | partitions | 是 | 否 | 3.0 行为未知 |
|
||||
| 39 | update | 是 | 否 | 允许更新部分列 |
|
||||
| 40 | cachelast | 是 | 否 | 由 create db 的参数 cacheModel 取代 |
|
||||
| 41 | maxSQLLength | 是 | 否 | SQL 上限为 1MB,无需参数控制 |
|
||||
| 42 | maxWildCardsLength | 是 | 否 | 3.0 行为未知 |
|
||||
| 43 | maxRegexStringLen | 是 | 否 | 3.0 行为未知 |
|
||||
| 44 | maxNumOfOrderedRes | 是 | 否 | 3.0 行为未知 |
|
||||
| 45 | maxConnections | 是 | 否 | 取决于系统配置和系统处理能力,详见后面的 Note |
|
||||
| 46 | mnodeEqualVnodeNum | 是 | 否 | 3.0 行为未知 |
|
||||
| 47 | http | 是 | 否 | http 服务由 taosAdapter 提供 |
|
||||
| 48 | httpEnableRecordSql | 是 | 否 | taosd 不提供 http 服务 |
|
||||
| 49 | httpMaxThreads | 是 | 否 | taosd 不提供 http 服务 |
|
||||
| 50 | restfulRowLimit | 是 | 否 | taosd 不提供 http 服务 |
|
||||
| 51 | httpDbNameMandatory | 是 | 否 | taosd 不提供 http 服务 |
|
||||
| 52 | httpKeepAlive | 是 | 否 | taosd 不提供 http 服务 |
|
||||
| 53 | enableRecordSql | 是 | 否 | 3.0 行为未知 |
|
||||
| 54 | maxBinaryDisplayWidth | 是 | 否 | 3.0 行为未知 |
|
||||
| 55 | stream | 是 | 否 | 默认启用连续查询 |
|
||||
| 56 | retrieveBlockingModel | 是 | 否 | 3.0 行为未知 |
|
||||
| 57 | tsdbMetaCompactRatio | 是 | 否 | 3.0 行为未知 |
|
||||
| 58 | defaultJSONStrType | 是 | 否 | 3.0 行为未知 |
|
||||
| 59 | walFlushSize | 是 | 否 | 3.0 行为未知 |
|
||||
| 60 | flowctrl | 是 | 否 | 3.0 行为未知 |
|
||||
| 61 | slaveQuery | 是 | 否 | 3.0 行为未知: slave vnode 是否能够处理查询? |
|
||||
| 62 | adjustMaster | 是 | 否 | 3.0 行为未知 |
|
||||
| 63 | topicBinaryLen | 是 | 否 | 3.0 行为未知 |
|
||||
| 64 | telegrafUseFieldNum | 是 | 否 | 3.0 行为未知 |
|
||||
| 65 | deadLockKillQuery | 是 | 否 | 3.0 行为未知 |
|
||||
| 66 | clientMerge | 是 | 否 | 3.0 行为未知 |
|
||||
| 67 | sdbDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 68 | odbcDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 69 | httpDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 70 | monDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 71 | cqDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 72 | shortcutFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 73 | probeSeconds | 是 | 否 | 3.0 行为未知 |
|
||||
| 74 | probeKillSeconds | 是 | 否 | 3.0 行为未知 |
|
||||
| 75 | probeInterval | 是 | 否 | 3.0 行为未知 |
|
||||
| 76 | lossyColumns | 是 | 否 | 3.0 行为未知 |
|
||||
| 77 | fPrecision | 是 | 否 | 3.0 行为未知 |
|
||||
| 78 | dPrecision | 是 | 否 | 3.0 行为未知 |
|
||||
| 79 | maxRange | 是 | 否 | 3.0 行为未知 |
|
||||
| 80 | range | 是 | 否 | 3.0 行为未知 |
|
||||
|
|
|
@ -48,6 +48,7 @@ extern int32_t tsMaxNumOfDistinctResults;
|
|||
extern int32_t tsCompatibleModel;
|
||||
extern bool tsPrintAuth;
|
||||
extern int64_t tsTickPerMin[3];
|
||||
extern int64_t tsTickPerHour[3];
|
||||
extern int32_t tsCountAlwaysReturnValue;
|
||||
extern float tsSelectivityRatio;
|
||||
extern int32_t tsTagFilterResCacheSize;
|
||||
|
@ -185,6 +186,7 @@ extern bool tsDisableStream;
|
|||
extern int64_t tsStreamBufferSize;
|
||||
extern int64_t tsCheckpointInterval;
|
||||
extern bool tsFilterScalarMode;
|
||||
extern int32_t tsKeepTimeOffset;
|
||||
extern int32_t tsMaxStreamBackendCache;
|
||||
extern int32_t tsPQSortMemThreshold;
|
||||
|
||||
|
|
|
@ -246,7 +246,6 @@ typedef struct SSortLogicNode {
|
|||
SLogicNode node;
|
||||
SNodeList* pSortKeys;
|
||||
bool groupSort;
|
||||
int64_t maxRows;
|
||||
} SSortLogicNode;
|
||||
|
||||
typedef struct SPartitionLogicNode {
|
||||
|
@ -524,7 +523,6 @@ typedef struct SSortPhysiNode {
|
|||
SNodeList* pExprs; // these are expression list of order_by_clause and parameter expression of aggregate function
|
||||
SNodeList* pSortKeys; // element is SOrderByExprNode, and SOrderByExprNode::pExpr is SColumnNode
|
||||
SNodeList* pTargets;
|
||||
int64_t maxRows;
|
||||
} SSortPhysiNode;
|
||||
|
||||
typedef SSortPhysiNode SGroupSortPhysiNode;
|
||||
|
|
|
@ -77,7 +77,7 @@ PriorityQueueNode* taosPQTop(PriorityQueue* pq);
|
|||
|
||||
size_t taosPQSize(PriorityQueue* pq);
|
||||
|
||||
void taosPQPush(PriorityQueue* pq, const PriorityQueueNode* node);
|
||||
PriorityQueueNode* taosPQPush(PriorityQueue* pq, const PriorityQueueNode* node);
|
||||
|
||||
void taosPQPop(PriorityQueue* pq);
|
||||
|
||||
|
@ -89,7 +89,13 @@ void taosBQSetFn(BoundedQueue* q, pq_comp_fn fn);
|
|||
|
||||
void destroyBoundedQueue(BoundedQueue* q);
|
||||
|
||||
void taosBQPush(BoundedQueue* q, PriorityQueueNode* n);
|
||||
/*
|
||||
* Push one node into BQ
|
||||
* @retval NULL if n is upper than top node in q, and n is not freed
|
||||
* @retval the pushed Node if pushing succeeded
|
||||
* @note if maxSize exceeded, the original highest node is popped and freed with deleteFn
|
||||
* */
|
||||
PriorityQueueNode* taosBQPush(BoundedQueue* q, PriorityQueueNode* n);
|
||||
|
||||
PriorityQueueNode* taosBQTop(BoundedQueue* q);
|
||||
|
||||
|
|
|
@ -108,6 +108,9 @@
|
|||
# time period of keeping log files, in days
|
||||
# logKeepDays 0
|
||||
|
||||
# unit Hour. Latency of data migration
|
||||
# keepTimeOffset 0
|
||||
|
||||
|
||||
############ 3. Debug Flag and levels #############################################
|
||||
|
||||
|
|
|
@ -102,8 +102,6 @@ static const SSysDbTableSchema userDBSchema[] = {
|
|||
{.name = "wal_fsync_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
||||
{.name = "wal_retention_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
||||
{.name = "wal_retention_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
|
||||
{.name = "wal_roll_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
||||
{.name = "wal_segment_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
|
||||
{.name = "stt_trigger", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
|
||||
{.name = "table_prefix", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
|
||||
{.name = "table_suffix", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
|
||||
|
|
|
@ -186,6 +186,13 @@ bool tsDeployOnSnode = true;
|
|||
* TSDB_TIME_PRECISION_NANO: 60000000000L
|
||||
*/
|
||||
int64_t tsTickPerMin[] = {60000L, 60000000L, 60000000000L};
|
||||
/*
|
||||
* millisecond by default
|
||||
* for TSDB_TIME_PRECISION_MILLI: 3600000L
|
||||
* TSDB_TIME_PRECISION_MICRO: 3600000000L
|
||||
* TSDB_TIME_PRECISION_NANO: 3600000000000L
|
||||
*/
|
||||
int64_t tsTickPerHour[] = {3600000L, 3600000000L, 3600000000000L};
|
||||
|
||||
// lossy compress 6
|
||||
char tsLossyColumns[32] = ""; // "float|double" means all float and double columns can be lossy compressed. set empty
|
||||
|
@ -217,6 +224,7 @@ bool tsDisableStream = false;
|
|||
int64_t tsStreamBufferSize = 128 * 1024 * 1024;
|
||||
int64_t tsCheckpointInterval = 3 * 60 * 60 * 1000;
|
||||
bool tsFilterScalarMode = false;
|
||||
int32_t tsKeepTimeOffset = 0; // latency of data migration
|
||||
|
||||
#ifndef _STORAGE
|
||||
int32_t taosSetTfsCfg(SConfig *pCfg) {
|
||||
|
@ -537,6 +545,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
if (cfgAddInt32(pCfg, "cacheLazyLoadThreshold", tsCacheLazyLoadThreshold, 0, 100000, 0) != 0) return -1;
|
||||
|
||||
if (cfgAddBool(pCfg, "filterScalarMode", tsFilterScalarMode, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "keepTimeOffset", tsKeepTimeOffset, 0, 23, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "maxStreamBackendCache", tsMaxStreamBackendCache, 16, 1024, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "pqSortMemThreshold", tsPQSortMemThreshold, 1, 10240, 0) != 0) return -1;
|
||||
|
||||
|
@ -921,6 +930,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
|||
tsCheckpointInterval = cfgGetItem(pCfg, "checkpointInterval")->i64;
|
||||
|
||||
tsFilterScalarMode = cfgGetItem(pCfg, "filterScalarMode")->bval;
|
||||
tsKeepTimeOffset = cfgGetItem(pCfg, "keepTimeOffset")->i32;
|
||||
tsMaxStreamBackendCache = cfgGetItem(pCfg, "maxStreamBackendCache")->i32;
|
||||
tsPQSortMemThreshold = cfgGetItem(pCfg, "pqSortMemThreshold")->i32;
|
||||
|
||||
|
@ -1478,6 +1488,19 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
|
|||
return;
|
||||
}
|
||||
|
||||
if (strcasecmp(option, "keepTimeOffset") == 0) {
|
||||
int32_t newKeepTimeOffset = atoi(value);
|
||||
if (newKeepTimeOffset < 0 || newKeepTimeOffset > 23) {
|
||||
uError("failed to set keepTimeOffset from %d to %d. Valid range: [0, 23]", tsKeepTimeOffset, newKeepTimeOffset);
|
||||
return;
|
||||
}
|
||||
|
||||
uInfo("keepTimeOffset set from %d to %d", tsKeepTimeOffset, newKeepTimeOffset);
|
||||
tsKeepTimeOffset = newKeepTimeOffset;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
const char *options[] = {
|
||||
"dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag", "tqDebugFlag",
|
||||
"fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tmrDebugFlag", "uDebugFlag",
|
||||
|
|
|
@ -1840,12 +1840,6 @@ static void mndDumpDbInfoData(SMnode *pMnode, SSDataBlock *pBlock, SDbObj *pDb,
|
|||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetVal(pColInfo, rows, (const char *)&pDb->cfg.walRetentionSize, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetVal(pColInfo, rows, (const char *)&pDb->cfg.walRollPeriod, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetVal(pColInfo, rows, (const char *)&pDb->cfg.walSegmentSize, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetVal(pColInfo, rows, (const char *)&pDb->cfg.sstTrigger, false);
|
||||
|
||||
|
|
|
@ -78,6 +78,7 @@ ESyncRole vnodeGetRole(SVnode *pVnode);
|
|||
int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list);
|
||||
int32_t vnodeGetCtbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg), void *arg);
|
||||
int32_t vnodeGetStbIdList(SVnode *pVnode, int64_t suid, SArray *list);
|
||||
int32_t vnodeGetStbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg, void* arg1), void *arg);
|
||||
void *vnodeGetIdx(void *pVnode);
|
||||
void *vnodeGetIvtIdx(void *pVnode);
|
||||
|
||||
|
@ -126,6 +127,9 @@ tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name);
|
|||
int32_t metaGetCachedTbGroup(void *pVnode, tb_uid_t suid, const uint8_t *pKey, int32_t keyLen, SArray **pList);
|
||||
int32_t metaPutTbGroupToCache(void* pVnode, uint64_t suid, const void *pKey, int32_t keyLen, void *pPayload,
|
||||
int32_t payloadLen);
|
||||
bool metaTbInFilterCache(void *pVnode, tb_uid_t suid, int8_t type);
|
||||
int32_t metaPutTbToFilterCache(void *pVnode, tb_uid_t suid, int8_t type);
|
||||
int32_t metaSizeOfTbFilterCache(void *pVnode, int8_t type);
|
||||
|
||||
int32_t metaGetStbStats(void *pVnode, int64_t uid, int64_t *numOfTables);
|
||||
|
||||
|
|
|
@ -66,6 +66,10 @@ struct SMetaCache {
|
|||
SHashObj* pTableEntry;
|
||||
SLRUCache* pResCache;
|
||||
} STbGroupResCache;
|
||||
|
||||
struct STbFilterCache {
|
||||
SHashObj* pStb;
|
||||
} STbFilterCache;
|
||||
};
|
||||
|
||||
static void entryCacheClose(SMeta* pMeta) {
|
||||
|
@ -168,6 +172,12 @@ int32_t metaCacheOpen(SMeta* pMeta) {
|
|||
taosHashSetFreeFp(pCache->STbGroupResCache.pTableEntry, freeCacheEntryFp);
|
||||
taosThreadMutexInit(&pCache->STbGroupResCache.lock, NULL);
|
||||
|
||||
pCache->STbFilterCache.pStb = taosHashInit(0, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
||||
if (pCache->STbFilterCache.pStb == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err2;
|
||||
}
|
||||
|
||||
pMeta->pCache = pCache;
|
||||
return code;
|
||||
|
||||
|
@ -193,6 +203,8 @@ void metaCacheClose(SMeta* pMeta) {
|
|||
taosThreadMutexDestroy(&pMeta->pCache->STbGroupResCache.lock);
|
||||
taosHashCleanup(pMeta->pCache->STbGroupResCache.pTableEntry);
|
||||
|
||||
taosHashCleanup(pMeta->pCache->STbFilterCache.pStb);
|
||||
|
||||
taosMemoryFree(pMeta->pCache);
|
||||
pMeta->pCache = NULL;
|
||||
}
|
||||
|
@ -880,3 +892,31 @@ int32_t metaTbGroupCacheClear(SMeta* pMeta, uint64_t suid) {
|
|||
metaDebug("vgId:%d suid:%" PRId64 " cached related tb group cleared", vgId, suid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
bool metaTbInFilterCache(void* pVnode, tb_uid_t suid, int8_t type) {
|
||||
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
|
||||
|
||||
if (type == 0 && taosHashGet(pMeta->pCache->STbFilterCache.pStb, &suid, sizeof(suid))) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t metaPutTbToFilterCache(void* pVnode, tb_uid_t suid, int8_t type) {
|
||||
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
|
||||
|
||||
if (type == 0) {
|
||||
return taosHashPut(pMeta->pCache->STbFilterCache.pStb, &suid, sizeof(suid), NULL, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t metaSizeOfTbFilterCache(void* pVnode, int8_t type) {
|
||||
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
|
||||
if (type == 0) {
|
||||
return taosHashGetSize(pMeta->pCache->STbFilterCache.pStb);
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -542,6 +542,8 @@ int32_t tsdbFidLevel(int32_t fid, STsdbKeepCfg *pKeepCfg, int64_t nowSec) {
|
|||
ASSERT(0);
|
||||
}
|
||||
|
||||
nowSec = nowSec - tsKeepTimeOffset * tsTickPerHour[pKeepCfg->precision];
|
||||
|
||||
key = nowSec - pKeepCfg->keep0 * tsTickPerMin[pKeepCfg->precision];
|
||||
aFid[0] = tsdbKeyFid(key, pKeepCfg->days, pKeepCfg->precision);
|
||||
key = nowSec - pKeepCfg->keep1 * tsTickPerMin[pKeepCfg->precision];
|
||||
|
|
|
@ -496,6 +496,30 @@ int32_t vnodeGetStbIdList(SVnode *pVnode, int64_t suid, SArray *list) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t vnodeGetStbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg, void *arg1),
|
||||
void *arg) {
|
||||
SMStbCursor *pCur = metaOpenStbCursor(pVnode->pMeta, suid);
|
||||
if (!pCur) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
tb_uid_t id = metaStbCursorNext(pCur);
|
||||
if (id == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
if ((*filter) && (*filter)(arg, &id)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
taosArrayPush(list, &id);
|
||||
}
|
||||
|
||||
metaCloseStbCursor(pCur);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num) {
|
||||
SMCtbCursor *pCur = metaOpenCtbCursor(pVnode->pMeta, suid, 0);
|
||||
if (!pCur) {
|
||||
|
@ -531,6 +555,58 @@ static int32_t vnodeGetStbColumnNum(SVnode *pVnode, tb_uid_t suid, int *num) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
#ifdef TD_ENTERPRISE
|
||||
#define TK_LOG_STB_NUM 19
|
||||
static const char *tkLogStb[TK_LOG_STB_NUM] = {"cluster_info",
|
||||
"data_dir",
|
||||
"dnodes_info",
|
||||
"d_info",
|
||||
"grants_info",
|
||||
"keeper_monitor",
|
||||
"logs",
|
||||
"log_dir",
|
||||
"log_summary",
|
||||
"m_info",
|
||||
"taosadapter_restful_http_request_fail",
|
||||
"taosadapter_restful_http_request_in_flight",
|
||||
"taosadapter_restful_http_request_summary_milliseconds",
|
||||
"taosadapter_restful_http_request_total",
|
||||
"taosadapter_system_cpu_percent",
|
||||
"taosadapter_system_mem_percent",
|
||||
"temp_dir",
|
||||
"vgroups_info",
|
||||
"vnodes_role"};
|
||||
|
||||
// exclude stbs of taoskeeper log
|
||||
static int32_t vnodeGetTimeSeriesBlackList(SVnode *pVnode) {
|
||||
char *dbName = strchr(pVnode->config.dbname, '.');
|
||||
if (!dbName || 0 != strncmp(++dbName, "log", TSDB_DB_NAME_LEN)) {
|
||||
return 0;
|
||||
}
|
||||
int32_t tbSize = metaSizeOfTbFilterCache(pVnode, 0);
|
||||
if (tbSize < TK_LOG_STB_NUM) {
|
||||
for (int32_t i = 0; i < TK_LOG_STB_NUM; ++i) {
|
||||
tb_uid_t suid = metaGetTableEntryUidByName(pVnode->pMeta, tkLogStb[i]);
|
||||
if (suid != 0) {
|
||||
metaPutTbToFilterCache(pVnode, suid, 0);
|
||||
}
|
||||
}
|
||||
tbSize = metaSizeOfTbFilterCache(pVnode, 0);
|
||||
}
|
||||
|
||||
return tbSize;
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool vnodeTimeSeriesFilter(void *arg1, void *arg2) {
|
||||
SVnode *pVnode = (SVnode *)arg1;
|
||||
|
||||
if (metaTbInFilterCache(pVnode, *(tb_uid_t *)(arg2), 0)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t vnodeGetTimeSeriesNum(SVnode *pVnode, int64_t *num) {
|
||||
SArray *suidList = NULL;
|
||||
|
||||
|
@ -539,7 +615,13 @@ int32_t vnodeGetTimeSeriesNum(SVnode *pVnode, int64_t *num) {
|
|||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
if (vnodeGetStbIdList(pVnode, 0, suidList) < 0) {
|
||||
int32_t tbFilterSize = 0;
|
||||
#ifdef TD_ENTERPRISE
|
||||
tbFilterSize = vnodeGetTimeSeriesBlackList(pVnode);
|
||||
#endif
|
||||
|
||||
if ((!tbFilterSize && vnodeGetStbIdList(pVnode, 0, suidList) < 0) ||
|
||||
(tbFilterSize && vnodeGetStbIdListByFilter(pVnode, 0, suidList, vnodeTimeSeriesFilter, pVnode) < 0)) {
|
||||
qError("vgId:%d, failed to get stb id list error: %s", TD_VID(pVnode), terrstr());
|
||||
taosArrayDestroy(suidList);
|
||||
return TSDB_CODE_FAILED;
|
||||
|
|
|
@ -291,12 +291,11 @@ static void setCreateDBResultIntoDataBlock(SSDataBlock* pBlock, char* dbName, ch
|
|||
"CREATE DATABASE `%s` BUFFER %d CACHESIZE %d CACHEMODEL '%s' COMP %d DURATION %dm "
|
||||
"WAL_FSYNC_PERIOD %d MAXROWS %d MINROWS %d STT_TRIGGER %d KEEP %dm,%dm,%dm PAGES %d PAGESIZE %d PRECISION '%s' REPLICA %d "
|
||||
"WAL_LEVEL %d VGROUPS %d SINGLE_STABLE %d TABLE_PREFIX %d TABLE_SUFFIX %d TSDB_PAGESIZE %d "
|
||||
"WAL_RETENTION_PERIOD %d WAL_RETENTION_SIZE %" PRId64 " WAL_ROLL_PERIOD %d WAL_SEGMENT_SIZE %" PRId64,
|
||||
"WAL_RETENTION_PERIOD %d WAL_RETENTION_SIZE %" PRId64,
|
||||
dbName, pCfg->buffer, pCfg->cacheSize, cacheModelStr(pCfg->cacheLast), pCfg->compression, pCfg->daysPerFile,
|
||||
pCfg->walFsyncPeriod, pCfg->maxRows, pCfg->minRows, pCfg->sstTrigger, pCfg->daysToKeep0, pCfg->daysToKeep1, pCfg->daysToKeep2,
|
||||
pCfg->pages, pCfg->pageSize, prec, pCfg->replications, pCfg->walLevel, pCfg->numOfVgroups,
|
||||
1 == pCfg->numOfStables, hashPrefix, pCfg->hashSuffix, pCfg->tsdbPageSize, pCfg->walRetentionPeriod,
|
||||
pCfg->walRetentionSize, pCfg->walRollPeriod, pCfg->walSegmentSize);
|
||||
1 == pCfg->numOfStables, hashPrefix, pCfg->hashSuffix, pCfg->tsdbPageSize, pCfg->walRetentionPeriod, pCfg->walRetentionSize);
|
||||
|
||||
if (retentions) {
|
||||
len += sprintf(buf2 + VARSTR_HEADER_SIZE + len, " RETENTIONS %s", retentions);
|
||||
|
|
|
@ -64,8 +64,8 @@ typedef int32_t (*_sort_merge_compar_fn_t)(const void* p1, const void* p2, void*
|
|||
/**
|
||||
*
|
||||
* @param type
|
||||
* @param maxRows keep maxRows at most
|
||||
* @param maxTupleLength max len of one tuple, for check if heap sort is applicable
|
||||
* @param maxRows keep maxRows at most, if 0, pq sort will not be used
|
||||
* @param maxTupleLength max len of one tuple, for check if pq sort is applicable
|
||||
* @param sortBufSize sort memory buf size, for check if heap sort is applicable
|
||||
* @return
|
||||
*/
|
||||
|
@ -73,6 +73,8 @@ SSortHandle* tsortCreateSortHandle(SArray* pOrderInfo, int32_t type, int32_t pag
|
|||
SSDataBlock* pBlock, const char* idstr, uint64_t maxRows, uint32_t maxTupleLength,
|
||||
uint32_t sortBufSize);
|
||||
|
||||
void tsortSetForceUsePQSort(SSortHandle* pHandle);
|
||||
|
||||
/**
|
||||
*
|
||||
* @param pSortHandle
|
||||
|
|
|
@ -55,7 +55,11 @@ SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode*
|
|||
pOperator->exprSupp.pExprInfo = createExprInfo(pSortNode->pExprs, NULL, &numOfCols);
|
||||
pOperator->exprSupp.numOfExprs = numOfCols;
|
||||
calcSortOperMaxTupleLength(pInfo, pSortNode->pSortKeys);
|
||||
pInfo->maxRows = pSortNode->maxRows;
|
||||
pInfo->maxRows = -1;
|
||||
if (pSortNode->node.pLimit) {
|
||||
SLimitNode* pLimit = (SLimitNode*)pSortNode->node.pLimit;
|
||||
if (pLimit->limit > 0) pInfo->maxRows = pLimit->limit;
|
||||
}
|
||||
|
||||
int32_t numOfOutputCols = 0;
|
||||
int32_t code =
|
||||
|
@ -718,7 +722,7 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData
|
|||
resetLimitInfoForNextGroup(&pInfo->limitInfo);
|
||||
}
|
||||
|
||||
if (p->info.rows > 0) {
|
||||
if (p->info.rows > 0 || limitReached) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,6 +45,7 @@ struct SSortHandle {
|
|||
uint64_t maxRows;
|
||||
uint32_t maxTupleLength;
|
||||
uint32_t sortBufSize;
|
||||
bool forceUsePQSort;
|
||||
BoundedQueue* pBoundedQueue;
|
||||
uint32_t tmpRowIdx;
|
||||
|
||||
|
@ -73,7 +74,7 @@ static void* createTuple(uint32_t columnNum, uint32_t tupleLen) {
|
|||
uint32_t totalLen = sizeof(uint32_t) * columnNum + BitmapLen(columnNum) + tupleLen;
|
||||
return taosMemoryCalloc(1, totalLen);
|
||||
}
|
||||
static void destoryTuple(void* t) { taosMemoryFree(t); }
|
||||
static void destoryAllocatedTuple(void* t) { taosMemoryFree(t); }
|
||||
|
||||
#define tupleOffset(tuple, colIdx) ((uint32_t*)(tuple + sizeof(uint32_t) * colIdx))
|
||||
#define tupleSetOffset(tuple, colIdx, offset) (*tupleOffset(tuple, colIdx) = offset)
|
||||
|
@ -107,12 +108,65 @@ static void* tupleGetField(char* t, uint32_t colIdx, uint32_t colNum) {
|
|||
return t + *tupleOffset(t, colIdx);
|
||||
}
|
||||
|
||||
static int32_t colDataComparFn(const void* pLeft, const void* pRight, void* param);
|
||||
|
||||
SSDataBlock* tsortGetSortedDataBlock(const SSortHandle* pSortHandle) {
|
||||
return createOneDataBlock(pSortHandle->pDataBlock, false);
|
||||
}
|
||||
|
||||
#define AllocatedTupleType 0
|
||||
#define ReferencedTupleType 1 // tuple references to one row in pDataBlock
|
||||
typedef struct TupleDesc {
|
||||
uint8_t type;
|
||||
char* data; // if type is AllocatedTuple, then points to the created tuple, otherwise points to the DataBlock
|
||||
} TupleDesc;
|
||||
|
||||
typedef struct ReferencedTuple {
|
||||
TupleDesc desc;
|
||||
size_t rowIndex;
|
||||
} ReferencedTuple;
|
||||
|
||||
static TupleDesc* createAllocatedTuple(SSDataBlock* pBlock, size_t colNum, uint32_t tupleLen, size_t rowIdx) {
|
||||
TupleDesc* t = taosMemoryCalloc(1, sizeof(TupleDesc));
|
||||
void* pTuple = createTuple(colNum, tupleLen);
|
||||
if (!pTuple) {
|
||||
taosMemoryFree(t);
|
||||
return NULL;
|
||||
}
|
||||
size_t colLen = 0;
|
||||
uint32_t offset = tupleGetDataStartOffset(colNum);
|
||||
for (size_t colIdx = 0; colIdx < colNum; ++colIdx) {
|
||||
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, colIdx);
|
||||
if (colDataIsNull_s(pCol, rowIdx)) {
|
||||
offset = tupleAddField((char**)&pTuple, colNum, offset, colIdx, 0, 0, true, tupleLen);
|
||||
} else {
|
||||
colLen = colDataGetRowLength(pCol, rowIdx);
|
||||
offset =
|
||||
tupleAddField((char**)&pTuple, colNum, offset, colIdx, colDataGetData(pCol, rowIdx), colLen, false, tupleLen);
|
||||
}
|
||||
}
|
||||
t->type = AllocatedTupleType;
|
||||
t->data = pTuple;
|
||||
return t;
|
||||
}
|
||||
|
||||
void* tupleDescGetField(const TupleDesc* pDesc, int32_t colIdx, uint32_t colNum) {
|
||||
if (pDesc->type == ReferencedTupleType) {
|
||||
ReferencedTuple* pRefTuple = (ReferencedTuple*)pDesc;
|
||||
SColumnInfoData* pCol = taosArrayGet(((SSDataBlock*)pDesc->data)->pDataBlock, colIdx);
|
||||
if (colDataIsNull_s(pCol, pRefTuple->rowIndex)) return NULL;
|
||||
return colDataGetData(pCol, pRefTuple->rowIndex);
|
||||
} else {
|
||||
return tupleGetField(pDesc->data, colIdx, colNum);
|
||||
}
|
||||
}
|
||||
|
||||
void destroyTuple(void* t) {
|
||||
TupleDesc* pDesc = t;
|
||||
if (pDesc->type == AllocatedTupleType) {
|
||||
destoryAllocatedTuple(pDesc->data);
|
||||
taosMemoryFree(pDesc);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param type
|
||||
|
@ -130,11 +184,11 @@ SSortHandle* tsortCreateSortHandle(SArray* pSortInfo, int32_t type, int32_t page
|
|||
pSortHandle->loops = 0;
|
||||
|
||||
pSortHandle->maxTupleLength = maxTupleLength;
|
||||
if (maxRows < 0)
|
||||
pSortHandle->sortBufSize = 0;
|
||||
else
|
||||
if (maxRows != 0) {
|
||||
pSortHandle->sortBufSize = sortBufSize;
|
||||
pSortHandle->maxRows = maxRows;
|
||||
pSortHandle->maxRows = maxRows;
|
||||
}
|
||||
pSortHandle->forceUsePQSort = false;
|
||||
|
||||
if (pBlock != NULL) {
|
||||
pSortHandle->pDataBlock = createOneDataBlock(pBlock, false);
|
||||
|
@ -779,7 +833,7 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
|
|||
|
||||
int64_t el = taosGetTimestampUs() - p;
|
||||
pHandle->sortElapsed += el;
|
||||
|
||||
if (pHandle->maxRows > 0) blockDataKeepFirstNRows(pHandle->pDataBlock, pHandle->maxRows);
|
||||
code = doAddToBuf(pHandle->pDataBlock, pHandle);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
|
@ -804,6 +858,7 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
|
|||
return code;
|
||||
}
|
||||
|
||||
if (pHandle->maxRows > 0) blockDataKeepFirstNRows(pHandle->pDataBlock, pHandle->maxRows);
|
||||
int64_t el = taosGetTimestampUs() - p;
|
||||
pHandle->sortElapsed += el;
|
||||
|
||||
|
@ -936,8 +991,17 @@ static STupleHandle* tsortBufMergeSortNextTuple(SSortHandle* pHandle) {
|
|||
return &pHandle->tupleHandle;
|
||||
}
|
||||
|
||||
static bool tsortIsForceUsePQSort(SSortHandle* pHandle) {
|
||||
return pHandle->forceUsePQSort == true;
|
||||
}
|
||||
|
||||
void tsortSetForceUsePQSort(SSortHandle* pHandle) {
|
||||
pHandle->forceUsePQSort = true;
|
||||
}
|
||||
|
||||
static bool tsortIsPQSortApplicable(SSortHandle* pHandle) {
|
||||
if (pHandle->type != SORT_SINGLESOURCE_SORT) return false;
|
||||
if (tsortIsForceUsePQSort(pHandle)) return true;
|
||||
uint64_t maxRowsFitInMemory = pHandle->sortBufSize / (pHandle->maxTupleLength + sizeof(char*));
|
||||
return maxRowsFitInMemory > pHandle->maxRows;
|
||||
}
|
||||
|
@ -956,16 +1020,17 @@ static bool tsortPQComFnReverse(void*a, void* b, void* param) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t colDataComparFn(const void* pLeft, const void* pRight, void* param) {
|
||||
char* pLTuple = (char*)pLeft;
|
||||
char* pRTuple = (char*)pRight;
|
||||
static int32_t tupleComparFn(const void* pLeft, const void* pRight, void* param) {
|
||||
TupleDesc* pLeftDesc = (TupleDesc*)pLeft;
|
||||
TupleDesc* pRightDesc = (TupleDesc*)pRight;
|
||||
|
||||
SSortHandle* pHandle = (SSortHandle*)param;
|
||||
SArray* orderInfo = (SArray*)pHandle->pSortInfo;
|
||||
uint32_t colNum = blockDataGetNumOfCols(pHandle->pDataBlock);
|
||||
for (int32_t i = 0; i < orderInfo->size; ++i) {
|
||||
SBlockOrderInfo* pOrder = TARRAY_GET_ELEM(orderInfo, i);
|
||||
void *lData = tupleGetField(pLTuple, pOrder->slotId, colNum);
|
||||
void *rData = tupleGetField(pRTuple, pOrder->slotId, colNum);
|
||||
void *lData = tupleDescGetField(pLeftDesc, pOrder->slotId, colNum);
|
||||
void *rData = tupleDescGetField(pRightDesc, pOrder->slotId, colNum);
|
||||
if (!lData && !rData) continue;
|
||||
if (!lData) return pOrder->nullFirst ? -1 : 1;
|
||||
if (!rData) return pOrder->nullFirst ? 1 : -1;
|
||||
|
@ -984,9 +1049,9 @@ static int32_t colDataComparFn(const void* pLeft, const void* pRight, void* para
|
|||
}
|
||||
|
||||
static int32_t tsortOpenForPQSort(SSortHandle* pHandle) {
|
||||
pHandle->pBoundedQueue = createBoundedQueue(pHandle->maxRows, tsortPQCompFn, destoryTuple, pHandle);
|
||||
pHandle->pBoundedQueue = createBoundedQueue(pHandle->maxRows, tsortPQCompFn, destroyTuple, pHandle);
|
||||
if (NULL == pHandle->pBoundedQueue) return TSDB_CODE_OUT_OF_MEMORY;
|
||||
tsortSetComparFp(pHandle, colDataComparFn);
|
||||
tsortSetComparFp(pHandle, tupleComparFn);
|
||||
|
||||
SSortSource** pSource = taosArrayGet(pHandle->pOrderedSource, 0);
|
||||
SSortSource* source = *pSource;
|
||||
|
@ -1018,24 +1083,17 @@ static int32_t tsortOpenForPQSort(SSortHandle* pHandle) {
|
|||
}
|
||||
}
|
||||
}
|
||||
size_t colLen = 0;
|
||||
ReferencedTuple refTuple = {.desc.data = (char*)pBlock, .desc.type = ReferencedTupleType, .rowIndex = 0};
|
||||
for (size_t rowIdx = 0; rowIdx < pBlock->info.rows; ++rowIdx) {
|
||||
void* pTuple = createTuple(colNum, tupleLen);
|
||||
if (pTuple == NULL) return TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
||||
uint32_t offset = tupleGetDataStartOffset(colNum);
|
||||
for (size_t colIdx = 0; colIdx < colNum; ++colIdx) {
|
||||
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, colIdx);
|
||||
if (colDataIsNull_s(pCol, rowIdx)) {
|
||||
offset = tupleAddField((char**)&pTuple, colNum, offset, colIdx, 0, 0, true, tupleLen);
|
||||
} else {
|
||||
colLen = colDataGetRowLength(pCol, rowIdx);
|
||||
offset = tupleAddField((char**)&pTuple, colNum, offset, colIdx, colDataGetData(pCol, rowIdx), colLen, false,
|
||||
tupleLen);
|
||||
}
|
||||
refTuple.rowIndex = rowIdx;
|
||||
pqNode.data = &refTuple;
|
||||
PriorityQueueNode* pPushedNode = taosBQPush(pHandle->pBoundedQueue, &pqNode);
|
||||
if (!pPushedNode) {
|
||||
// do nothing if push failed
|
||||
} else {
|
||||
pPushedNode->data = createAllocatedTuple(pBlock, colNum, tupleLen, rowIdx);
|
||||
if (pPushedNode->data == NULL) return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pqNode.data = pTuple;
|
||||
taosBQPush(pHandle->pBoundedQueue, &pqNode);
|
||||
}
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -1044,7 +1102,7 @@ static int32_t tsortOpenForPQSort(SSortHandle* pHandle) {
|
|||
static STupleHandle* tsortPQSortNextTuple(SSortHandle* pHandle) {
|
||||
blockDataCleanup(pHandle->pDataBlock);
|
||||
blockDataEnsureCapacity(pHandle->pDataBlock, 1);
|
||||
// abondan the top tuple if queue size bigger than max size
|
||||
// abandon the top tuple if queue size bigger than max size
|
||||
if (taosBQSize(pHandle->pBoundedQueue) == taosBQMaxSize(pHandle->pBoundedQueue) + 1) {
|
||||
taosBQPop(pHandle->pBoundedQueue);
|
||||
}
|
||||
|
@ -1056,7 +1114,7 @@ static STupleHandle* tsortPQSortNextTuple(SSortHandle* pHandle) {
|
|||
if (taosBQSize(pHandle->pBoundedQueue) > 0) {
|
||||
uint32_t colNum = blockDataGetNumOfCols(pHandle->pDataBlock);
|
||||
PriorityQueueNode* node = taosBQTop(pHandle->pBoundedQueue);
|
||||
char* pTuple = (char*)node->data;
|
||||
char* pTuple = ((TupleDesc*)node->data)->data;
|
||||
|
||||
for (uint32_t i = 0; i < colNum; ++i) {
|
||||
void* pData = tupleGetField(pTuple, i, colNum);
|
||||
|
|
|
@ -502,7 +502,6 @@ static int32_t logicSortCopy(const SSortLogicNode* pSrc, SSortLogicNode* pDst) {
|
|||
COPY_BASE_OBJECT_FIELD(node, logicNodeCopy);
|
||||
CLONE_NODE_LIST_FIELD(pSortKeys);
|
||||
COPY_SCALAR_FIELD(groupSort);
|
||||
COPY_SCALAR_FIELD(maxRows);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -2115,9 +2115,6 @@ static int32_t physiSortNodeToJson(const void* pObj, SJson* pJson) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodeListToJson(pJson, jkSortPhysiPlanTargets, pNode->pTargets);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkSortPhysiPlanMaxRows, pNode->maxRows);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -2135,9 +2132,6 @@ static int32_t jsonToPhysiSortNode(const SJson* pJson, void* pObj) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = jsonToNodeList(pJson, jkSortPhysiPlanTargets, &pNode->pTargets);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBigIntValue(pJson, jkSortPhysiPlanMaxRows, &pNode->maxRows);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -2594,7 +2594,7 @@ static int32_t msgToPhysiMergeNode(STlvDecoder* pDecoder, void* pObj) {
|
|||
return code;
|
||||
}
|
||||
|
||||
enum { PHY_SORT_CODE_BASE_NODE = 1, PHY_SORT_CODE_EXPR, PHY_SORT_CODE_SORT_KEYS, PHY_SORT_CODE_TARGETS, PHY_SORT_CODE_MAX_ROWS };
|
||||
enum { PHY_SORT_CODE_BASE_NODE = 1, PHY_SORT_CODE_EXPR, PHY_SORT_CODE_SORT_KEYS, PHY_SORT_CODE_TARGETS };
|
||||
|
||||
static int32_t physiSortNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
||||
const SSortPhysiNode* pNode = (const SSortPhysiNode*)pObj;
|
||||
|
@ -2609,9 +2609,6 @@ static int32_t physiSortNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeObj(pEncoder, PHY_SORT_CODE_TARGETS, nodeListToMsg, pNode->pTargets);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeI64(pEncoder, PHY_SORT_CODE_MAX_ROWS, pNode->maxRows);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -2635,9 +2632,6 @@ static int32_t msgToPhysiSortNode(STlvDecoder* pDecoder, void* pObj) {
|
|||
case PHY_SORT_CODE_TARGETS:
|
||||
code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pTargets);
|
||||
break;
|
||||
case PHY_SORT_CODE_MAX_ROWS:
|
||||
code = tlvDecodeI64(pTlv, &pNode->maxRows);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1027,7 +1027,6 @@ static int32_t createSortLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
|
|||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
pSort->maxRows = -1;
|
||||
pSort->groupSort = pSelect->groupSort;
|
||||
pSort->node.groupAction = pSort->groupSort ? GROUP_ACTION_KEEP : GROUP_ACTION_CLEAR;
|
||||
pSort->node.requireDataOrder = DATA_ORDER_LEVEL_NONE;
|
||||
|
@ -1299,7 +1298,6 @@ static int32_t createSetOpSortLogicNode(SLogicPlanContext* pCxt, SSetOperator* p
|
|||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
pSort->maxRows = -1;
|
||||
TSWAP(pSort->node.pLimit, pSetOperator->pLimit);
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
|
|
@ -2635,11 +2635,13 @@ static bool pushDownLimitOptShouldBeOptimized(SLogicNode* pNode) {
|
|||
}
|
||||
|
||||
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pNode->pChildren, 0);
|
||||
// push down to sort node
|
||||
if (QUERY_NODE_LOGIC_PLAN_SORT == nodeType(pChild)) {
|
||||
SLimitNode* pChildLimit = (SLimitNode*)(pChild->pLimit);
|
||||
// if we have pushed down, we skip it
|
||||
if ((*(SSortLogicNode*)pChild).maxRows != -1) return false;
|
||||
} else if (QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(pChild)) {
|
||||
if (pChild->pLimit) return false;
|
||||
} else if (QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(pChild) || QUERY_NODE_LOGIC_PLAN_SORT == nodeType(pNode)) {
|
||||
// push down to table scan node
|
||||
// if pNode is sortNode, we skip push down limit info to table scan node
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
@ -2654,13 +2656,10 @@ static int32_t pushDownLimitOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLog
|
|||
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pNode->pChildren, 0);
|
||||
nodesDestroyNode(pChild->pLimit);
|
||||
if (QUERY_NODE_LOGIC_PLAN_SORT == nodeType(pChild)) {
|
||||
SLimitNode* pLimitNode = (SLimitNode*)pNode->pLimit;
|
||||
int64_t maxRows = -1;
|
||||
if (pLimitNode->limit != -1) {
|
||||
maxRows = pLimitNode->limit;
|
||||
if (pLimitNode->offset != -1) maxRows += pLimitNode->offset;
|
||||
}
|
||||
((SSortLogicNode*)pChild)->maxRows = maxRows;
|
||||
pChild->pLimit = nodesCloneNode(pNode->pLimit);
|
||||
SLimitNode* pLimit = (SLimitNode*)pChild->pLimit;
|
||||
pLimit->limit += pLimit->offset;
|
||||
pLimit->offset = 0;
|
||||
} else {
|
||||
pChild->pLimit = pNode->pLimit;
|
||||
pNode->pLimit = NULL;
|
||||
|
|
|
@ -1374,7 +1374,6 @@ static int32_t createSortPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren
|
|||
if (NULL == pSort) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pSort->maxRows = pSortLogicNode->maxRows;
|
||||
|
||||
SNodeList* pPrecalcExprs = NULL;
|
||||
SNodeList* pSortKeys = NULL;
|
||||
|
|
|
@ -1018,7 +1018,6 @@ static int32_t stbSplCreatePartSortNode(SSortLogicNode* pSort, SLogicNode** pOut
|
|||
splSetParent((SLogicNode*)pPartSort);
|
||||
pPartSort->pSortKeys = pSortKeys;
|
||||
pPartSort->groupSort = pSort->groupSort;
|
||||
pPartSort->maxRows = pSort->maxRows;
|
||||
code = stbSplCreateMergeKeys(pPartSort->pSortKeys, pPartSort->node.pTargets, &pMergeKeys);
|
||||
}
|
||||
|
||||
|
|
|
@ -230,7 +230,7 @@ static void pqSwapPQNode(PriorityQueueNode* a, PriorityQueueNode* b) {
|
|||
|
||||
size_t taosPQSize(PriorityQueue* pq) { return pqContainerSize(pq); }
|
||||
|
||||
static void pqHeapify(PriorityQueue* pq, size_t from, size_t last) {
|
||||
static PriorityQueueNode* pqHeapify(PriorityQueue* pq, size_t from, size_t last) {
|
||||
size_t largest = from;
|
||||
do {
|
||||
from = largest;
|
||||
|
@ -246,6 +246,7 @@ static void pqHeapify(PriorityQueue* pq, size_t from, size_t last) {
|
|||
pqSwapPQNode(pqContainerGetEle(pq, from), pqContainerGetEle(pq, largest));
|
||||
}
|
||||
} while (largest != from);
|
||||
return pqContainerGetEle(pq, largest);
|
||||
}
|
||||
|
||||
static void pqBuildHeap(PriorityQueue* pq) {
|
||||
|
@ -257,12 +258,13 @@ static void pqBuildHeap(PriorityQueue* pq) {
|
|||
}
|
||||
}
|
||||
|
||||
static void pqReverseHeapify(PriorityQueue* pq, size_t i) {
|
||||
static PriorityQueueNode* pqReverseHeapify(PriorityQueue* pq, size_t i) {
|
||||
while (i > 0 && !pq->fn(pqContainerGetEle(pq, i)->data, pqContainerGetEle(pq, pqParent(i))->data, pq->param)) {
|
||||
size_t parentIdx = pqParent(i);
|
||||
pqSwapPQNode(pqContainerGetEle(pq, i), pqContainerGetEle(pq, parentIdx));
|
||||
i = parentIdx;
|
||||
}
|
||||
return pqContainerGetEle(pq, i);
|
||||
}
|
||||
|
||||
static void pqUpdate(PriorityQueue* pq, size_t i) {
|
||||
|
@ -290,9 +292,9 @@ PriorityQueueNode* taosPQTop(PriorityQueue* pq) {
|
|||
return pqContainerGetEle(pq, 0);
|
||||
}
|
||||
|
||||
void taosPQPush(PriorityQueue* pq, const PriorityQueueNode* node) {
|
||||
PriorityQueueNode* taosPQPush(PriorityQueue* pq, const PriorityQueueNode* node) {
|
||||
taosArrayPush(pq->container, node);
|
||||
pqReverseHeapify(pq, pqContainerSize(pq) - 1);
|
||||
return pqReverseHeapify(pq, pqContainerSize(pq) - 1);
|
||||
}
|
||||
|
||||
void taosPQPop(PriorityQueue* pq) {
|
||||
|
@ -324,16 +326,20 @@ void destroyBoundedQueue(BoundedQueue* q) {
|
|||
taosMemoryFree(q);
|
||||
}
|
||||
|
||||
void taosBQPush(BoundedQueue* q, PriorityQueueNode* n) {
|
||||
PriorityQueueNode* taosBQPush(BoundedQueue* q, PriorityQueueNode* n) {
|
||||
if (pqContainerSize(q->queue) == q->maxSize + 1) {
|
||||
PriorityQueueNode* top = pqContainerGetEle(q->queue, 0);
|
||||
void *p = top->data;
|
||||
top->data = n->data;
|
||||
n->data = p;
|
||||
if (q->queue->deleteFn) q->queue->deleteFn(n->data);
|
||||
pqHeapify(q->queue, 0, taosBQSize(q));
|
||||
if (q->queue->fn(top->data, n->data, q->queue->param)) {
|
||||
return NULL;
|
||||
} else {
|
||||
void* p = top->data;
|
||||
top->data = n->data;
|
||||
n->data = p;
|
||||
if (q->queue->deleteFn) q->queue->deleteFn(n->data);
|
||||
}
|
||||
return pqHeapify(q->queue, 0, taosBQSize(q));
|
||||
} else {
|
||||
taosPQPush(q->queue, n);
|
||||
return taosPQPush(q->queue, n);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
import sys
|
||||
import sys
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
|
@ -8,15 +8,15 @@ from math import inf
|
|||
class TDTestCase:
|
||||
def caseDescription(self):
|
||||
'''
|
||||
case1<shenglian zhou>: [TD-11204]Difference improvement that can ignore negative
|
||||
'''
|
||||
case1<shenglian zhou>: [TD-11204]Difference improvement that can ignore negative
|
||||
'''
|
||||
return
|
||||
|
||||
|
||||
def init(self, conn, logSql, replicaVer=1):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), False)
|
||||
self._conn = conn
|
||||
|
||||
|
||||
def restartTaosd(self, index=1, dbname="db"):
|
||||
tdDnodes.stop(index)
|
||||
tdDnodes.startWithoutSleep(index)
|
||||
|
@ -42,17 +42,17 @@ class TDTestCase:
|
|||
tdSql.query('show create database scd;')
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 'scd')
|
||||
tdSql.checkData(0, 1, "CREATE DATABASE `scd` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 1 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0 WAL_ROLL_PERIOD 0 WAL_SEGMENT_SIZE 0")
|
||||
tdSql.checkData(0, 1, "CREATE DATABASE `scd` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 1 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0")
|
||||
|
||||
tdSql.query('show create database scd2;')
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 'scd2')
|
||||
tdSql.checkData(0, 1, "CREATE DATABASE `scd2` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 3 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0 WAL_ROLL_PERIOD 0 WAL_SEGMENT_SIZE 0")
|
||||
tdSql.checkData(0, 1, "CREATE DATABASE `scd2` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 3 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0")
|
||||
|
||||
tdSql.query('show create database scd4')
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 'scd4')
|
||||
tdSql.checkData(0, 1, "CREATE DATABASE `scd4` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 13 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0 WAL_ROLL_PERIOD 0 WAL_SEGMENT_SIZE 0")
|
||||
tdSql.checkData(0, 1, "CREATE DATABASE `scd4` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 13 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0")
|
||||
|
||||
|
||||
self.restartTaosd(1, dbname='scd')
|
||||
|
@ -60,17 +60,17 @@ class TDTestCase:
|
|||
tdSql.query('show create database scd;')
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 'scd')
|
||||
tdSql.checkData(0, 1, "CREATE DATABASE `scd` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 1 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0 WAL_ROLL_PERIOD 0 WAL_SEGMENT_SIZE 0")
|
||||
tdSql.checkData(0, 1, "CREATE DATABASE `scd` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 1 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0")
|
||||
|
||||
tdSql.query('show create database scd2;')
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 'scd2')
|
||||
tdSql.checkData(0, 1, "CREATE DATABASE `scd2` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 3 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0 WAL_ROLL_PERIOD 0 WAL_SEGMENT_SIZE 0")
|
||||
tdSql.checkData(0, 1, "CREATE DATABASE `scd2` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 3 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0")
|
||||
|
||||
tdSql.query('show create database scd4')
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 'scd4')
|
||||
tdSql.checkData(0, 1, "CREATE DATABASE `scd4` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 13 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0 WAL_ROLL_PERIOD 0 WAL_SEGMENT_SIZE 0")
|
||||
tdSql.checkData(0, 1, "CREATE DATABASE `scd4` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 13 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0")
|
||||
|
||||
|
||||
tdSql.execute('drop database scd')
|
||||
|
|
|
@ -117,12 +117,6 @@ endi
|
|||
if $data23_db != 0 then # wal_retention_size
|
||||
return -1
|
||||
endi
|
||||
if $data24_db != 0 then # wal_roll_period
|
||||
return -1
|
||||
endi
|
||||
if $data25_db != 0 then # wal_segment_size
|
||||
return -1
|
||||
endi
|
||||
|
||||
#sql show db.vgroups
|
||||
#if $data[0][4] == leader then
|
||||
|
|
|
@ -7,11 +7,11 @@ sql connect
|
|||
#sql create database d1 vgroups 2
|
||||
sql create database d1 vgroups 2 table_prefix 3 table_suffix 2
|
||||
sql select * from information_schema.ins_databases
|
||||
print $data(d1)[27] $data(d1)[28]
|
||||
if $data(d1)[27] != 3 then
|
||||
print $data(d1)[25] $data(d1)[26]
|
||||
if $data(d1)[25] != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data(d1)[28] != 2 then
|
||||
if $data(d1)[26] != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -45,8 +45,6 @@ class TDTestCase:
|
|||
"replica":1,
|
||||
"wal_level":1,
|
||||
"wal_fsync_period":6000,
|
||||
"wal_roll_period":0,
|
||||
"wal_segment_size":1024,
|
||||
"vgroups":self.vgroups,
|
||||
"stt_trigger":1,
|
||||
"tsdb_pagesize":16
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
import sys
|
||||
import time
|
||||
import threading
|
||||
|
@ -64,7 +63,7 @@ class TDTestCase:
|
|||
queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
|
||||
sqlString = "create topic %s as %s" %(topic_name, queryString)
|
||||
tdSql.query(f'select * from information_schema.ins_databases')
|
||||
db_wal_retention_period_list = list(map(lambda x:x[-8] if x[0] == paraDict['dbName'] else None, tdSql.queryResult))
|
||||
db_wal_retention_period_list = list(map(lambda x:x[-6] if x[0] == paraDict['dbName'] else None, tdSql.queryResult))
|
||||
for i in range(len(db_wal_retention_period_list)):
|
||||
if db_wal_retention_period_list[0] is None or db_wal_retention_period_list[-1] is None:
|
||||
db_wal_retention_period_list.remove(None)
|
||||
|
@ -177,4 +176,4 @@ class TDTestCase:
|
|||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
Loading…
Reference in New Issue