Merge branch 'enh/triggerCheckPoint2' of https://github.com/taosdata/TDengine into enh/triggerCheckPoint2
This commit is contained in:
commit
e1de4984a6
|
@ -81,10 +81,6 @@ Set<String> subscription() throws SQLException;
|
||||||
|
|
||||||
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
|
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
|
||||||
|
|
||||||
void commitAsync();
|
|
||||||
|
|
||||||
void commitAsync(OffsetCommitCallback callback);
|
|
||||||
|
|
||||||
void commitSync() throws SQLException;
|
void commitSync() throws SQLException;
|
||||||
|
|
||||||
void close() throws SQLException;
|
void close() throws SQLException;
|
||||||
|
|
|
@ -174,7 +174,7 @@ Use curl to verify that the TDengine REST API is working on port 6041:
|
||||||
```
|
```
|
||||||
$ curl -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
|
$ curl -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
|
||||||
Handling connection for 6041
|
Handling connection for 6041
|
||||||
{"code":0,"column_meta":[["name","VARCHAR",64],["create_time","TIMESTAMP",8],["vgroups","SMALLINT",2],["ntables","BIGINT",8],["replica","TINYINT",1],["strict","VARCHAR",4],["duration","VARCHAR",10],["keep","VARCHAR",32],["buffer","INT",4],["pagesize","INT",4],["pages","INT",4],["minrows","INT",4],["maxrows","INT",4],["comp","TINYINT",1],["precision","VARCHAR",2],["status","VARCHAR",10],["retention","VARCHAR",60],["single_stable","BOOL",1],["cachemodel","VARCHAR",11],["cachesize","INT",4],["wal_level","TINYINT",1],["wal_fsync_period","INT",4],["wal_retention_period","INT",4],["wal_retention_size","BIGINT",8],["wal_roll_period","INT",4],["wal_segment_size","BIGINT",8]],"data":[["information_schema",null,null,16,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null],["performance_schema",null,null,10,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null]],"rows":2}
|
{"code":0,"column_meta":[["name","VARCHAR",64],["create_time","TIMESTAMP",8],["vgroups","SMALLINT",2],["ntables","BIGINT",8],["replica","TINYINT",1],["strict","VARCHAR",4],["duration","VARCHAR",10],["keep","VARCHAR",32],["buffer","INT",4],["pagesize","INT",4],["pages","INT",4],["minrows","INT",4],["maxrows","INT",4],["comp","TINYINT",1],["precision","VARCHAR",2],["status","VARCHAR",10],["retention","VARCHAR",60],["single_stable","BOOL",1],["cachemodel","VARCHAR",11],["cachesize","INT",4],["wal_level","TINYINT",1],["wal_fsync_period","INT",4],["wal_retention_period","INT",4],["wal_retention_size","BIGINT",8]],"data":[["information_schema",null,null,16,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null],["performance_schema",null,null,10,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null]],"rows":2}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Enable the dashboard for visualization
|
## Enable the dashboard for visualization
|
||||||
|
|
|
@ -36,8 +36,6 @@ database_option: {
|
||||||
| TSDB_PAGESIZE value
|
| TSDB_PAGESIZE value
|
||||||
| WAL_RETENTION_PERIOD value
|
| WAL_RETENTION_PERIOD value
|
||||||
| WAL_RETENTION_SIZE value
|
| WAL_RETENTION_SIZE value
|
||||||
| WAL_ROLL_PERIOD value
|
|
||||||
| WAL_SEGMENT_SIZE value
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -77,8 +75,6 @@ database_option: {
|
||||||
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
|
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
|
||||||
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep for consumption. Alter it with a proper value at first to create topics.
|
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep for consumption. Alter it with a proper value at first to create topics.
|
||||||
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit.
|
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit.
|
||||||
- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after TSDB data in memory are flushed to disk.
|
|
||||||
- WAL_SEGMENT_SIZE: specifies the maximum size of a WAL file. After the current WAL file reaches this size, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after TSDB data in memory are flushed to disk.
|
|
||||||
### Example Statement
|
### Example Statement
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
|
|
@ -334,8 +334,6 @@ The following list shows all reserved keywords:
|
||||||
- WAL_LEVEL
|
- WAL_LEVEL
|
||||||
- WAL_RETENTION_PERIOD
|
- WAL_RETENTION_PERIOD
|
||||||
- WAL_RETENTION_SIZE
|
- WAL_RETENTION_SIZE
|
||||||
- WAL_ROLL_PERIOD
|
|
||||||
- WAL_SEGMENT_SIZE
|
|
||||||
- WATERMARK
|
- WATERMARK
|
||||||
- WHERE
|
- WHERE
|
||||||
- WINDOW_CLOSE
|
- WINDOW_CLOSE
|
||||||
|
|
|
@ -100,12 +100,10 @@ Provides information about user-created databases. Similar to SHOW DATABASES.
|
||||||
| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk. It should be noted that `wal_fsync_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk. It should be noted that `wal_fsync_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 24 | wal_retention_period | INT | WAL retention period. It should be noted that `wal_retention_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 24 | wal_retention_period | INT | WAL retention period. It should be noted that `wal_retention_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 25 | wal_retention_size | INT | Maximum WAL size. It should be noted that `wal_retention_size` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 25 | wal_retention_size | INT | Maximum WAL size. It should be noted that `wal_retention_size` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 26 | wal_roll_period | INT | WAL rotation period. It should be noted that `wal_roll_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 26 | stt_trigger | SMALLINT | The threshold for number of files to trigger file merging. It should be noted that `stt_trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 27 | wal_segment_size | BIGINT | WAL file size. It should be noted that `wal_segment_size` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 27 | table_prefix | SMALLINT | The prefix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_prefix` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 28 | stt_trigger | SMALLINT | The threshold for number of files to trigger file merging. It should be noted that `stt_trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 28 | table_suffix | SMALLINT | The suffix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_suffix` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 29 | table_prefix | SMALLINT | The prefix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_prefix` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 29 | tsdb_pagesize | INT | The page size for internal storage engine, its unit is KB. It should be noted that `tsdb_pagesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 30 | table_suffix | SMALLINT | The suffix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_suffix` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
|
||||||
| 31 | tsdb_pagesize | INT | The page size for internal storage engine, its unit is KB. It should be noted that `tsdb_pagesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
|
||||||
|
|
||||||
## INS_FUNCTIONS
|
## INS_FUNCTIONS
|
||||||
|
|
||||||
|
@ -283,6 +281,8 @@ Provides dnode configuration information.
|
||||||
| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
|
| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
|
||||||
| 3 | vgroup_id | INT | Vgroup ID for the consumer |
|
| 3 | vgroup_id | INT | Vgroup ID for the consumer |
|
||||||
| 4 | consumer_id | BIGINT | Consumer ID |
|
| 4 | consumer_id | BIGINT | Consumer ID |
|
||||||
|
| 5 | offset | BINARY(64) | Consumption progress |
|
||||||
|
| 6 | rows | BIGINT | Number of consumption items |
|
||||||
|
|
||||||
## INS_STREAMS
|
## INS_STREAMS
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ The following data types can be used in the schema for standard tables.
|
||||||
| 6 | ALTER USER | Modified | Deprecated<ul><li>PRIVILEGE: Specified user permissions. Replaced by GRANT and REVOKE. <br/>Added</li><li>ENABLE: Enables or disables a user. </li><li>SYSINFO: Specifies whether a user can query system information. </li></ul>
|
| 6 | ALTER USER | Modified | Deprecated<ul><li>PRIVILEGE: Specified user permissions. Replaced by GRANT and REVOKE. <br/>Added</li><li>ENABLE: Enables or disables a user. </li><li>SYSINFO: Specifies whether a user can query system information. </li></ul>
|
||||||
| 7 | COMPACT VNODES | Not supported | Compacted the data on a vnode. Not supported.
|
| 7 | COMPACT VNODES | Not supported | Compacted the data on a vnode. Not supported.
|
||||||
| 8 | CREATE ACCOUNT | Deprecated| This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
|
| 8 | CREATE ACCOUNT | Deprecated| This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
|
||||||
| 9 | CREATE DATABASE | Modified | Deprecated<ul><li>BLOCKS: Specified the number of blocks for each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>CACHE: Specified the size of the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>DAYS: The length of time to store in a single file. Replaced by DURATION. </li><li>FSYNC: Specified the fsync interval when WAL was set to 2. Replaced by WAL_FSYNC_PERIOD. </li><li>QUORUM: Specified the number of confirmations required. STRICT is now used to specify strong or weak consistency. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>WAL: Specified the WAL level. Replaced by WAL_LEVEL. <br/>Added</li><li>BUFFER: Specifies the size of the write cache pool for each vnode. </li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>DURATION: Replaces DAYS. Now supports units. </li><li>PAGES: Specifies the number of pages in the metadata storage engine cache on each vnode. </li><li>PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. </li><li>RETENTIONS: Specifies the aggregation interval and retention period </li><li>STRICT: Specifies whether strong data consistency is enabled. </li><li>SINGLE_STABLE: Specifies whether a database can contain multiple supertables. </li><li>VGROUPS: Specifies the initial number of vgroups when a database is created. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_ROLL_PERIOD: Specifies the WAL rotation period. </li><li>WAL_SEGMENT_SIZE: specifies the maximum size of a WAL file. <br/>Modified</li><li>KEEP: Now supports units. </li></ul>
|
| 9 | CREATE DATABASE | Modified | Deprecated<ul><li>BLOCKS: Specified the number of blocks for each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>CACHE: Specified the size of the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>DAYS: The length of time to store in a single file. Replaced by DURATION. </li><li>FSYNC: Specified the fsync interval when WAL was set to 2. Replaced by WAL_FSYNC_PERIOD. </li><li>QUORUM: Specified the number of confirmations required. STRICT is now used to specify strong or weak consistency. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>WAL: Specified the WAL level. Replaced by WAL_LEVEL. <br/>Added</li><li>BUFFER: Specifies the size of the write cache pool for each vnode. </li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>DURATION: Replaces DAYS. Now supports units. </li><li>PAGES: Specifies the number of pages in the metadata storage engine cache on each vnode. </li><li>PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. </li><li>RETENTIONS: Specifies the aggregation interval and retention period </li><li>STRICT: Specifies whether strong data consistency is enabled. </li><li>SINGLE_STABLE: Specifies whether a database can contain multiple supertables. </li><li>VGROUPS: Specifies the initial number of vgroups when a database is created. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. <br/>Modified</li><li>KEEP: Now supports units. </li></ul>
|
||||||
| 10 | CREATE DNODE | Modified | Now supports specifying hostname and port separately<ul><li>CREATE DNODE dnode_host_name PORT port_val</li></ul>
|
| 10 | CREATE DNODE | Modified | Now supports specifying hostname and port separately<ul><li>CREATE DNODE dnode_host_name PORT port_val</li></ul>
|
||||||
| 11 | CREATE INDEX | Added | Creates an SMA index.
|
| 11 | CREATE INDEX | Added | Creates an SMA index.
|
||||||
| 12 | CREATE MNODE | Added | Creates an mnode.
|
| 12 | CREATE MNODE | Added | Creates an mnode.
|
||||||
|
|
|
@ -36,15 +36,16 @@ REST connection supports all platforms that can run Java.
|
||||||
|
|
||||||
| taos-jdbcdriver version | major changes | TDengine version |
|
| taos-jdbcdriver version | major changes | TDengine version |
|
||||||
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
|
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
|
||||||
|
| 3.2.4 | Subscription add the enable.auto.commit parameter and the unsubscribe() method in the WebSocket connection | 3.0.5.0 or later |
|
||||||
| 3.2.3 | Fixed resultSet data parsing failure in some cases | 3.0.5.0 or later |
|
| 3.2.3 | Fixed resultSet data parsing failure in some cases | 3.0.5.0 or later |
|
||||||
| 3.2.2 | subscription add seek function | 3.0.5.0 or later |
|
| 3.2.2 | Subscription add seek function | 3.0.5.0 or later |
|
||||||
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | 3.0.3.0 or later |
|
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | 3.0.3.0 or later |
|
||||||
| 3.2.0 | This version has been deprecated | - |
|
| 3.2.0 | This version has been deprecated | - |
|
||||||
| 3.1.0 | JDBC REST connection supports subscription over WebSocket | - |
|
| 3.1.0 | JDBC REST connection supports subscription over WebSocket | - |
|
||||||
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment | - |
|
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment | - |
|
||||||
| 3.0.0 | Support for TDengine 3.0 | 3.0.0.0 or later |
|
| 3.0.0 | Support for TDengine 3.0 | 3.0.0.0 or later |
|
||||||
| 2.0.42 | fix wasNull interface return value in WebSocket connection | - |
|
| 2.0.42 | Fix wasNull interface return value in WebSocket connection | - |
|
||||||
| 2.0.41 | fix decode method of username and password in REST connection | - |
|
| 2.0.41 | Fix decode method of username and password in REST connection | - |
|
||||||
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters | - |
|
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters | - |
|
||||||
| 2.0.38 | JDBC REST connections add bulk pull function | - |
|
| 2.0.38 | JDBC REST connections add bulk pull function | - |
|
||||||
| 2.0.37 | Support json tags | - |
|
| 2.0.37 | Support json tags | - |
|
||||||
|
|
|
@ -102,7 +102,7 @@ Ensure that your firewall rules do not block TCP port 6042 on any host in the c
|
||||||
| Value Range | 10-50000000 |
|
| Value Range | 10-50000000 |
|
||||||
| Default Value | 5000 |
|
| Default Value | 5000 |
|
||||||
|
|
||||||
### numOfRpcSessions
|
### numOfRpcSessions
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| ------------- | ------------------------------------------ |
|
| ------------- | ------------------------------------------ |
|
||||||
|
@ -202,7 +202,7 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
| Default Value | 0 |
|
| Default Value | 0 |
|
||||||
| Notes | 0: Disable SMA indexing and perform all queries on non-indexed data; 1: Enable SMA indexing and perform queries from suitable statements on precomputation results. |
|
| Notes | 0: Disable SMA indexing and perform all queries on non-indexed data; 1: Enable SMA indexing and perform queries from suitable statements on precomputation results. |
|
||||||
|
|
||||||
### countAlwaysReturnValue
|
### countAlwaysReturnValue
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
@ -713,6 +713,24 @@ The charset that takes effect is UTF-8.
|
||||||
| Value Range | 0: disable UDF; 1: enabled UDF |
|
| Value Range | 0: disable UDF; 1: enabled UDF |
|
||||||
| Default Value | 1 |
|
| Default Value | 1 |
|
||||||
|
|
||||||
|
### ttlChangeOnWrite
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| ------------- | ----------------------------------------------------------------------------- |
|
||||||
|
| Applicable | Server Only |
|
||||||
|
| Meaning | Whether the ttl expiration time changes with the table modification operation |
|
||||||
|
| Value Range | 0: not change; 1: change by modification |
|
||||||
|
| Default Value | 0 |
|
||||||
|
|
||||||
|
### keepTimeOffset
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| ------------- | ------------------------- |
|
||||||
|
| Applicable | Server Only |
|
||||||
|
| Meaning | Latency of data migration |
|
||||||
|
| Unit | hour |
|
||||||
|
| Value Range | 0-23 |
|
||||||
|
| Default Value | 0 |
|
||||||
|
|
||||||
## 3.0 Parameters
|
## 3.0 Parameters
|
||||||
|
|
||||||
|
@ -770,3 +788,5 @@ The charset that takes effect is UTF-8.
|
||||||
| 52 | charset | Yes | Yes | |
|
| 52 | charset | Yes | Yes | |
|
||||||
| 53 | udf | Yes | Yes | |
|
| 53 | udf | Yes | Yes | |
|
||||||
| 54 | enableCoreFile | Yes | Yes | |
|
| 54 | enableCoreFile | Yes | Yes | |
|
||||||
|
| 55 | ttlChangeOnWrite | No | Yes | |
|
||||||
|
| 56 | keepTimeOffset | Yes | Yes | |
|
||||||
|
|
|
@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 3.0.7.0
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.7.0" />
|
||||||
|
|
||||||
## 3.0.6.0
|
## 3.0.6.0
|
||||||
|
|
||||||
<Release type="tdengine" version="3.0.6.0" />
|
<Release type="tdengine" version="3.0.6.0" />
|
||||||
|
|
|
@ -81,10 +81,6 @@ Set<String> subscription() throws SQLException;
|
||||||
|
|
||||||
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
|
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
|
||||||
|
|
||||||
void commitAsync();
|
|
||||||
|
|
||||||
void commitAsync(OffsetCommitCallback callback);
|
|
||||||
|
|
||||||
void commitSync() throws SQLException;
|
void commitSync() throws SQLException;
|
||||||
|
|
||||||
void close() throws SQLException;
|
void close() throws SQLException;
|
||||||
|
|
|
@ -36,6 +36,7 @@ REST 连接支持所有能运行 Java 的平台。
|
||||||
|
|
||||||
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
|
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
|
||||||
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
|
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
|
||||||
|
| 3.2.4 | 数据订阅在 WebSocket 连接下增加 enable.auto.commit 参数,以及 unsubscribe() 方法。 | - |
|
||||||
| 3.2.3 | 修复 ResultSet 在一些情况数据解析失败 | - |
|
| 3.2.3 | 修复 ResultSet 在一些情况数据解析失败 | - |
|
||||||
| 3.2.2 | 新增功能:数据订阅支持 seek 功能。 | 3.0.5.0 及更高版本 |
|
| 3.2.2 | 新增功能:数据订阅支持 seek 功能。 | 3.0.5.0 及更高版本 |
|
||||||
| 3.2.1 | 新增功能:WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更:consumer poll 返回结果集为 ConsumerRecord,可通过 value() 获取指定结果集数据。 | 3.0.3.0 及更高版本 |
|
| 3.2.1 | 新增功能:WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更:consumer poll 返回结果集为 ConsumerRecord,可通过 value() 获取指定结果集数据。 | 3.0.3.0 及更高版本 |
|
||||||
|
|
|
@ -2,10 +2,10 @@
|
||||||
|
|
||||||
```text
|
```text
|
||||||
taos> show databases;
|
taos> show databases;
|
||||||
name | create_time | vgroups | ntables | replica | strict | duration | keep | buffer | pagesize | pages | minrows | maxrows | comp | precision | status | retention | single_stable | cachemodel | cachesize | wal_level | wal_fsync_period | wal_retention_period | wal_retention_size | wal_roll_period | wal_seg_size |
|
name | create_time | vgroups | ntables | replica | strict | duration | keep | buffer | pagesize | pages | minrows | maxrows | comp | precision | status | retention | single_stable | cachemodel | cachesize | wal_level | wal_fsync_period | wal_retention_period | wal_retention_size |
|
||||||
=========================================================================================================================================================================================================================================================================================================================================================================================================================================================================
|
===============================================================================================================================================================================================================================================================================================================================================================================================================================
|
||||||
information_schema | NULL | NULL | 14 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
information_schema | NULL | NULL | 14 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
||||||
performance_schema | NULL | NULL | 3 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
performance_schema | NULL | NULL | 3 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
||||||
test | 2022-08-04 16:46:40.506 | 2 | 0 | 1 | off | 14400m | 5256000m,5256000m,5256000m | 96 | 4 | 256 |
|
test | 2022-08-04 16:46:40.506 | 2 | 0 | 1 | off | 14400m | 5256000m,5256000m,5256000m | 96 | 4 | 256 |
|
||||||
100 | 4096 | 2 | ms | ready | NULL | false | none | 1 | 1 | 3000 | 0 | 0 | 0 | 0 |
|
100 | 4096 | 2 | ms | ready | NULL | false | none | 1 | 1 | 3000 | 0 | 0 | 0 | 0 |
|
||||||
Query OK, 3 rows in database (0.123000s)
|
Query OK, 3 rows in database (0.123000s)
|
||||||
|
|
|
@ -174,7 +174,7 @@ kubectl port-forward tdengine-0 6041:6041 &
|
||||||
```
|
```
|
||||||
$ curl -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
|
$ curl -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
|
||||||
Handling connection for 6041
|
Handling connection for 6041
|
||||||
{"code":0,"column_meta":[["name","VARCHAR",64],["create_time","TIMESTAMP",8],["vgroups","SMALLINT",2],["ntables","BIGINT",8],["replica","TINYINT",1],["strict","VARCHAR",4],["duration","VARCHAR",10],["keep","VARCHAR",32],["buffer","INT",4],["pagesize","INT",4],["pages","INT",4],["minrows","INT",4],["maxrows","INT",4],["comp","TINYINT",1],["precision","VARCHAR",2],["status","VARCHAR",10],["retention","VARCHAR",60],["single_stable","BOOL",1],["cachemodel","VARCHAR",11],["cachesize","INT",4],["wal_level","TINYINT",1],["wal_fsync_period","INT",4],["wal_retention_period","INT",4],["wal_retention_size","BIGINT",8],["wal_roll_period","INT",4],["wal_segment_size","BIGINT",8]],"data":[["information_schema",null,null,16,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null],["performance_schema",null,null,10,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null]],"rows":2}
|
{"code":0,"column_meta":[["name","VARCHAR",64],["create_time","TIMESTAMP",8],["vgroups","SMALLINT",2],["ntables","BIGINT",8],["replica","TINYINT",1],["strict","VARCHAR",4],["duration","VARCHAR",10],["keep","VARCHAR",32],["buffer","INT",4],["pagesize","INT",4],["pages","INT",4],["minrows","INT",4],["maxrows","INT",4],["comp","TINYINT",1],["precision","VARCHAR",2],["status","VARCHAR",10],["retention","VARCHAR",60],["single_stable","BOOL",1],["cachemodel","VARCHAR",11],["cachesize","INT",4],["wal_level","TINYINT",1],["wal_fsync_period","INT",4],["wal_retention_period","INT",4],["wal_retention_size","BIGINT",8]],"data":[["information_schema",null,null,16,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null],["performance_schema",null,null,10,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null]],"rows":2}
|
||||||
```
|
```
|
||||||
|
|
||||||
## 使用 dashboard 进行图形化管理
|
## 使用 dashboard 进行图形化管理
|
||||||
|
|
|
@ -36,7 +36,6 @@ database_option: {
|
||||||
| TSDB_PAGESIZE value
|
| TSDB_PAGESIZE value
|
||||||
| WAL_RETENTION_PERIOD value
|
| WAL_RETENTION_PERIOD value
|
||||||
| WAL_RETENTION_SIZE value
|
| WAL_RETENTION_SIZE value
|
||||||
| WAL_SEGMENT_SIZE value
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -76,8 +75,6 @@ database_option: {
|
||||||
- TSDB_PAGESIZE:一个 VNODE 中时序数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB到 16 MB。
|
- TSDB_PAGESIZE:一个 VNODE 中时序数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB到 16 MB。
|
||||||
- WAL_RETENTION_PERIOD: 为了数据订阅消费,需要WAL日志文件额外保留的最大时长策略。WAL日志清理,不受订阅客户端消费状态影响。单位为 s。默认为 0,表示无需为订阅保留。新建订阅,应先设置恰当的时长策略。
|
- WAL_RETENTION_PERIOD: 为了数据订阅消费,需要WAL日志文件额外保留的最大时长策略。WAL日志清理,不受订阅客户端消费状态影响。单位为 s。默认为 0,表示无需为订阅保留。新建订阅,应先设置恰当的时长策略。
|
||||||
- WAL_RETENTION_SIZE:为了数据订阅消费,需要WAL日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0,表示累计大小无上限。
|
- WAL_RETENTION_SIZE:为了数据订阅消费,需要WAL日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0,表示累计大小无上限。
|
||||||
- WAL_ROLL_PERIOD:wal 文件切换时长,单位为 s。当WAL文件创建并写入后,经过该时间,会自动创建一个新的WAL文件。默认为 0,即仅在TSDB落盘时创建新文件。
|
|
||||||
- WAL_SEGMENT_SIZE:wal 单个文件大小,单位为 KB。当前写入文件大小超过上限后会自动创建一个新的WAL文件。默认为 0,即仅在TSDB落盘时创建新文件。
|
|
||||||
### 创建数据库示例
|
### 创建数据库示例
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
|
|
@ -334,8 +334,6 @@ description: TDengine 保留关键字的详细列表
|
||||||
- WAL_LEVEL
|
- WAL_LEVEL
|
||||||
- WAL_RETENTION_PERIOD
|
- WAL_RETENTION_PERIOD
|
||||||
- WAL_RETENTION_SIZE
|
- WAL_RETENTION_SIZE
|
||||||
- WAL_ROLL_PERIOD
|
|
||||||
- WAL_SEGMENT_SIZE
|
|
||||||
- WATERMARK
|
- WATERMARK
|
||||||
- WHERE
|
- WHERE
|
||||||
- WINDOW_CLOSE
|
- WINDOW_CLOSE
|
||||||
|
|
|
@ -100,12 +100,10 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
||||||
| 23 | wal_fsync_period | INT | 数据落盘周期。需要注意,`wal_fsync_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 23 | wal_fsync_period | INT | 数据落盘周期。需要注意,`wal_fsync_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 24 | wal_retention_period | INT | WAL 的保存时长。需要注意,`wal_retention_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 24 | wal_retention_period | INT | WAL 的保存时长。需要注意,`wal_retention_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 25 | wal_retention_size | INT | WAL 的保存上限。需要注意,`wal_retention_size` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 25 | wal_retention_size | INT | WAL 的保存上限。需要注意,`wal_retention_size` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 26 | wal_roll_period | INT | wal 文件切换时长。需要注意,`wal_roll_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 26 | stt_trigger | SMALLINT | 触发文件合并的落盘文件的个数。需要注意,`stt_trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 27 | wal_segment_size | BIGINT | wal 单个文件大小。需要注意,`wal_segment_size` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 27 | table_prefix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。需要注意,`table_prefix` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 28 | stt_trigger | SMALLINT | 触发文件合并的落盘文件的个数。需要注意,`stt_trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 28 | table_suffix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。需要注意,`table_suffix` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 29 | table_prefix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。需要注意,`table_prefix` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 29 | tsdb_pagesize | INT | 时序数据存储引擎中的页大小。需要注意,`tsdb_pagesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 30 | table_suffix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。需要注意,`table_suffix` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
|
||||||
| 31 | tsdb_pagesize | INT | 时序数据存储引擎中的页大小。需要注意,`tsdb_pagesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
|
||||||
|
|
||||||
## INS_FUNCTIONS
|
## INS_FUNCTIONS
|
||||||
|
|
||||||
|
@ -284,6 +282,8 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
||||||
| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 |
|
| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 |
|
||||||
| 3 | vgroup_id | INT | 消费者被分配的 vgroup id |
|
| 3 | vgroup_id | INT | 消费者被分配的 vgroup id |
|
||||||
| 4 | consumer_id | BIGINT | 消费者的唯一 id |
|
| 4 | consumer_id | BIGINT | 消费者的唯一 id |
|
||||||
|
| 5 | offset | BINARY(64) | 消费者的消费进度 |
|
||||||
|
| 6 | rows | BIGINT | 消费者的消费的数据条数 |
|
||||||
|
|
||||||
## INS_STREAMS
|
## INS_STREAMS
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ description: "TDengine 3.0 版本的语法变更说明"
|
||||||
| 6 | ALTER USER | 调整 | 废除<ul><li>PRIVILEGE:修改用户权限。3.0版本使用GRANT和REVOKE来授予和回收权限。<br/>新增</li><li>ENABLE:启用或停用此用户。</li><li>SYSINFO:修改用户是否可查看系统信息。</li></ul>
|
| 6 | ALTER USER | 调整 | 废除<ul><li>PRIVILEGE:修改用户权限。3.0版本使用GRANT和REVOKE来授予和回收权限。<br/>新增</li><li>ENABLE:启用或停用此用户。</li><li>SYSINFO:修改用户是否可查看系统信息。</li></ul>
|
||||||
| 7 | COMPACT VNODES | 暂不支持 | 整理指定VNODE的数据。3.0.0版本暂不支持。
|
| 7 | COMPACT VNODES | 暂不支持 | 整理指定VNODE的数据。3.0.0版本暂不支持。
|
||||||
| 8 | CREATE ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
|
| 8 | CREATE ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
|
||||||
| 9 | CREATE DATABASE | 调整 | <p>废除</p><ul><li>BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHE:VNODE使用的内存块的大小。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>DAYS:数据文件存储数据的时间跨度。3.0版本使用DURATION代替。</li><li>FSYNC:当 WAL 设置为 2 时,执行 fsync 的周期。3.0版本使用WAL_FSYNC_PERIOD代替。</li><li>QUORUM:写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。</li><li>UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>WAL:WAL 级别。3.0版本使用WAL_LEVEL代替。</li></ul><p>新增</p><ul><li>BUFFER:一个 VNODE 写入内存池大小。</li><li>CACHEMODEL:表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE:表示缓存子表最近数据的内存大小。</li><li>DURATION:代替原DAYS参数。新增支持带单位的设置方式。</li><li>PAGES:一个 VNODE 中元数据存储引擎的缓存页个数。</li><li>PAGESIZE:一个 VNODE 中元数据存储引擎的页大小。</li><li>RETENTIONS:表示数据的聚合周期和保存时长。</li><li>STRICT:表示数据同步的一致性要求。</li><li>SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表。</li><li>VGROUPS:数据库中初始VGROUP的数目。</li><li>WAL_FSYNC_PERIOD:代替原FSYNC参数。</li><li>WAL_LEVEL:代替原WAL参数。</li><li>WAL_RETENTION_PERIOD:wal文件的额外保留策略,用于数据订阅。</li><li>WAL_RETENTION_SIZE:wal文件的额外保留策略,用于数据订阅。</li><li>WAL_ROLL_PERIOD:wal文件切换时长。</li><li>WAL_SEGMENT_SIZE:wal单个文件大小。</li></ul><p>调整</p><ul><li>KEEP:3.0版本新增支持带单位的设置方式。</li></ul>
|
| 9 | CREATE DATABASE | 调整 | <p>废除</p><ul><li>BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHE:VNODE使用的内存块的大小。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>DAYS:数据文件存储数据的时间跨度。3.0版本使用DURATION代替。</li><li>FSYNC:当 WAL 设置为 2 时,执行 fsync 的周期。3.0版本使用WAL_FSYNC_PERIOD代替。</li><li>QUORUM:写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。</li><li>UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>WAL:WAL 级别。3.0版本使用WAL_LEVEL代替。</li></ul><p>新增</p><ul><li>BUFFER:一个 VNODE 写入内存池大小。</li><li>CACHEMODEL:表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE:表示缓存子表最近数据的内存大小。</li><li>DURATION:代替原DAYS参数。新增支持带单位的设置方式。</li><li>PAGES:一个 VNODE 中元数据存储引擎的缓存页个数。</li><li>PAGESIZE:一个 VNODE 中元数据存储引擎的页大小。</li><li>RETENTIONS:表示数据的聚合周期和保存时长。</li><li>STRICT:表示数据同步的一致性要求。</li><li>SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表。</li><li>VGROUPS:数据库中初始VGROUP的数目。</li><li>WAL_FSYNC_PERIOD:代替原FSYNC参数。</li><li>WAL_LEVEL:代替原WAL参数。</li><li>WAL_RETENTION_PERIOD:wal文件的额外保留策略,用于数据订阅。</li><li>WAL_RETENTION_SIZE:wal文件的额外保留策略,用于数据订阅。</li></ul><p>调整</p><ul><li>KEEP:3.0版本新增支持带单位的设置方式。</li></ul>
|
||||||
| 10 | CREATE DNODE | 调整 | 新增主机名和端口号分开指定语法<ul><li>CREATE DNODE dnode_host_name PORT port_val</li></ul>
|
| 10 | CREATE DNODE | 调整 | 新增主机名和端口号分开指定语法<ul><li>CREATE DNODE dnode_host_name PORT port_val</li></ul>
|
||||||
| 11 | CREATE INDEX | 新增 | 创建SMA索引。
|
| 11 | CREATE INDEX | 新增 | 创建SMA索引。
|
||||||
| 12 | CREATE MNODE | 新增 | 创建管理节点。
|
| 12 | CREATE MNODE | 新增 | 创建管理节点。
|
||||||
|
|
|
@ -101,7 +101,7 @@ taos -C
|
||||||
| 取值范围 | 10-50000000 |
|
| 取值范围 | 10-50000000 |
|
||||||
| 缺省值 | 5000 |
|
| 缺省值 | 5000 |
|
||||||
|
|
||||||
### numOfRpcSessions
|
### numOfRpcSessions
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| --------| ---------------------- |
|
| --------| ---------------------- |
|
||||||
|
@ -120,7 +120,7 @@ taos -C
|
||||||
| 缺省值 | 500000 |
|
| 缺省值 | 500000 |
|
||||||
|
|
||||||
|
|
||||||
### numOfRpcSessions
|
### numOfRpcSessions
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ---------------------------- |
|
| -------- | ---------------------------- |
|
||||||
|
@ -717,6 +717,25 @@ charset 的有效值是 UTF-8。
|
||||||
| 取值范围 | 0: 不启动;1:启动 |
|
| 取值范围 | 0: 不启动;1:启动 |
|
||||||
| 缺省值 | 1 |
|
| 缺省值 | 1 |
|
||||||
|
|
||||||
|
### ttlChangeOnWrite
|
||||||
|
|
||||||
|
| 属性 | 说明 |
|
||||||
|
| -------- | ------------------ |
|
||||||
|
| 适用范围 | 仅服务端适用 |
|
||||||
|
| 含义 | ttl 到期时间是否伴随表的修改操作改变 |
|
||||||
|
| 取值范围 | 0: 不改变;1:改变 |
|
||||||
|
| 缺省值 | 0 |
|
||||||
|
|
||||||
|
### keepTimeOffset
|
||||||
|
|
||||||
|
| 属性 | 说明 |
|
||||||
|
| -------- | ------------------ |
|
||||||
|
| 适用范围 | 仅服务端适用 |
|
||||||
|
| 含义 | 迁移操作的延时 |
|
||||||
|
| 单位 | 小时 |
|
||||||
|
| 取值范围 | 0-23 |
|
||||||
|
| 缺省值 | 0 |
|
||||||
|
|
||||||
## 压缩参数
|
## 压缩参数
|
||||||
|
|
||||||
### compressMsgSize
|
### compressMsgSize
|
||||||
|
@ -784,6 +803,8 @@ charset 的有效值是 UTF-8。
|
||||||
| 52 | charset | 是 | 是 | |
|
| 52 | charset | 是 | 是 | |
|
||||||
| 53 | udf | 是 | 是 | |
|
| 53 | udf | 是 | 是 | |
|
||||||
| 54 | enableCoreFile | 是 | 是 | |
|
| 54 | enableCoreFile | 是 | 是 | |
|
||||||
|
| 55 | ttlChangeOnWrite | 否 | 是 | |
|
||||||
|
| 56 | keepTimeOffset | 是 | 是 | |
|
||||||
|
|
||||||
## 2.x->3.0 的废弃参数
|
## 2.x->3.0 的废弃参数
|
||||||
|
|
||||||
|
@ -798,76 +819,74 @@ charset 的有效值是 UTF-8。
|
||||||
| 7 | offlineThreshold | 是 | 否 | 3.0 行为未知 |
|
| 7 | offlineThreshold | 是 | 否 | 3.0 行为未知 |
|
||||||
| 8 | role | 是 | 否 | 由 supportVnode 决定是否能够创建 |
|
| 8 | role | 是 | 否 | 由 supportVnode 决定是否能够创建 |
|
||||||
| 9 | dnodeNopLoop | 是 | 否 | 2.6 文档中未找到此参数 |
|
| 9 | dnodeNopLoop | 是 | 否 | 2.6 文档中未找到此参数 |
|
||||||
| 10 | keepTimeOffset | 是 | 否 | 2.6 文档中未找到此参数 |
|
| 10 | rpcTimer | 是 | 否 | 3.0 行为未知 |
|
||||||
| 11 | rpcTimer | 是 | 否 | 3.0 行为未知 |
|
| 11 | rpcMaxTime | 是 | 否 | 3.0 行为未知 |
|
||||||
| 12 | rpcMaxTime | 是 | 否 | 3.0 行为未知 |
|
| 12 | rpcForceTcp | 是 | 否 | 默认为 TCP |
|
||||||
| 13 | rpcForceTcp | 是 | 否 | 默认为 TCP |
|
| 13 | tcpConnTimeout | 是 | 否 | 3.0 行为未知 |
|
||||||
| 14 | tcpConnTimeout | 是 | 否 | 3.0 行为未知 |
|
| 14 | syncCheckInterval | 是 | 否 | 3.0 行为未知 |
|
||||||
| 15 | syncCheckInterval | 是 | 否 | 3.0 行为未知 |
|
| 15 | maxTmrCtrl | 是 | 否 | 3.0 行为未知 |
|
||||||
| 16 | maxTmrCtrl | 是 | 否 | 3.0 行为未知 |
|
| 16 | monitorReplica | 是 | 否 | 由 RAFT 协议管理多副本 |
|
||||||
| 17 | monitorReplica | 是 | 否 | 由 RAFT 协议管理多副本 |
|
| 17 | smlTagNullName | 是 | 否 | 3.0 行为未知 |
|
||||||
| 18 | smlTagNullName | 是 | 否 | 3.0 行为未知 |
|
| 18 | ratioOfQueryCores | 是 | 否 | 由 线程池 相关配置参数决定 |
|
||||||
| 20 | ratioOfQueryCores | 是 | 否 | 由 线程池 相关配置参数决定 |
|
| 19 | maxStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||||
| 21 | maxStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
| 20 | maxFirstStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||||
| 22 | maxFirstStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
| 21 | retryStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||||
| 23 | retryStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
| 22 | streamCompDelayRatio | 是 | 否 | 3.0 行为未知 |
|
||||||
| 24 | streamCompDelayRatio | 是 | 否 | 3.0 行为未知 |
|
| 23 | maxVgroupsPerDb | 是 | 否 | 由 create db 的参数 vgroups 指定实际 vgroups 数量 |
|
||||||
| 25 | maxVgroupsPerDb | 是 | 否 | 由 create db 的参数 vgroups 指定实际 vgroups 数量 |
|
| 24 | maxTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||||
| 26 | maxTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
| 25 | minTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||||
| 27 | minTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
| 26 | tableIncStepPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||||
| 28 | tableIncStepPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
| 27 | cache | 是 | 否 | 由 buffer 代替 cache\*blocks |
|
||||||
| 29 | cache | 是 | 否 | 由 buffer 代替 cache\*blocks |
|
| 28 | blocks | 是 | 否 | 由 buffer 代替 cache\*blocks |
|
||||||
| 30 | blocks | 是 | 否 | 由 buffer 代替 cache\*blocks |
|
| 29 | days | 是 | 否 | 由 create db 的参数 duration 取代 |
|
||||||
| 31 | days | 是 | 否 | 由 create db 的参数 duration 取代 |
|
| 30 | keep | 是 | 否 | 由 create db 的参数 keep 取代 |
|
||||||
| 32 | keep | 是 | 否 | 由 create db 的参数 keep 取代 |
|
| 31 | minRows | 是 | 否 | 由 create db 的参数 minRows 取代 |
|
||||||
| 33 | minRows | 是 | 否 | 由 create db 的参数 minRows 取代 |
|
| 32 | maxRows | 是 | 否 | 由 create db 的参数 maxRows 取代 |
|
||||||
| 34 | maxRows | 是 | 否 | 由 create db 的参数 maxRows 取代 |
|
| 33 | quorum | 是 | 否 | 由 RAFT 协议决定 |
|
||||||
| 35 | quorum | 是 | 否 | 由 RAFT 协议决定 |
|
| 34 | comp | 是 | 否 | 由 create db 的参数 comp 取代 |
|
||||||
| 36 | comp | 是 | 否 | 由 create db 的参数 comp 取代 |
|
| 35 | walLevel | 是 | 否 | 由 create db 的参数 wal_level 取代 |
|
||||||
| 37 | walLevel | 是 | 否 | 由 create db 的参数 wal_level 取代 |
|
| 36 | fsync | 是 | 否 | 由 create db 的参数 wal_fsync_period 取代 |
|
||||||
| 38 | fsync | 是 | 否 | 由 create db 的参数 wal_fsync_period 取代 |
|
| 37 | replica | 是 | 否 | 由 create db 的参数 replica 取代 |
|
||||||
| 39 | replica | 是 | 否 | 由 create db 的参数 replica 取代 |
|
| 38 | partitions | 是 | 否 | 3.0 行为未知 |
|
||||||
| 40 | partitions | 是 | 否 | 3.0 行为未知 |
|
| 39 | update | 是 | 否 | 允许更新部分列 |
|
||||||
| 41 | update | 是 | 否 | 允许更新部分列 |
|
| 40 | cachelast | 是 | 否 | 由 create db 的参数 cacheModel 取代 |
|
||||||
| 42 | cachelast | 是 | 否 | 由 create db 的参数 cacheModel 取代 |
|
| 41 | maxSQLLength | 是 | 否 | SQL 上限为 1MB,无需参数控制 |
|
||||||
| 43 | maxSQLLength | 是 | 否 | SQL 上限为 1MB,无需参数控制 |
|
| 42 | maxWildCardsLength | 是 | 否 | 3.0 行为未知 |
|
||||||
| 44 | maxWildCardsLength | 是 | 否 | 3.0 行为未知 |
|
| 43 | maxRegexStringLen | 是 | 否 | 3.0 行为未知 |
|
||||||
| 45 | maxRegexStringLen | 是 | 否 | 3.0 行为未知 |
|
| 44 | maxNumOfOrderedRes | 是 | 否 | 3.0 行为未知 |
|
||||||
| 46 | maxNumOfOrderedRes | 是 | 否 | 3.0 行为未知 |
|
| 45 | maxConnections | 是 | 否 | 取决于系统配置和系统处理能力,详见后面的 Note |
|
||||||
| 47 | maxConnections | 是 | 否 | 取决于系统配置和系统处理能力,详见后面的 Note |
|
| 46 | mnodeEqualVnodeNum | 是 | 否 | 3.0 行为未知 |
|
||||||
| 48 | mnodeEqualVnodeNum | 是 | 否 | 3.0 行为未知 |
|
| 47 | http | 是 | 否 | http 服务由 taosAdapter 提供 |
|
||||||
| 49 | http | 是 | 否 | http 服务由 taosAdapter 提供 |
|
| 48 | httpEnableRecordSql | 是 | 否 | taosd 不提供 http 服务 |
|
||||||
| 50 | httpEnableRecordSql | 是 | 否 | taosd 不提供 http 服务 |
|
| 49 | httpMaxThreads | 是 | 否 | taosd 不提供 http 服务 |
|
||||||
| 51 | httpMaxThreads | 是 | 否 | taosd 不提供 http 服务 |
|
| 50 | restfulRowLimit | 是 | 否 | taosd 不提供 http 服务 |
|
||||||
| 52 | restfulRowLimit | 是 | 否 | taosd 不提供 http 服务 |
|
| 51 | httpDbNameMandatory | 是 | 否 | taosd 不提供 http 服务 |
|
||||||
| 53 | httpDbNameMandatory | 是 | 否 | taosd 不提供 http 服务 |
|
| 52 | httpKeepAlive | 是 | 否 | taosd 不提供 http 服务 |
|
||||||
| 54 | httpKeepAlive | 是 | 否 | taosd 不提供 http 服务 |
|
| 53 | enableRecordSql | 是 | 否 | 3.0 行为未知 |
|
||||||
| 55 | enableRecordSql | 是 | 否 | 3.0 行为未知 |
|
| 54 | maxBinaryDisplayWidth | 是 | 否 | 3.0 行为未知 |
|
||||||
| 56 | maxBinaryDisplayWidth | 是 | 否 | 3.0 行为未知 |
|
| 55 | stream | 是 | 否 | 默认启用连续查询 |
|
||||||
| 57 | stream | 是 | 否 | 默认启用连续查询 |
|
| 56 | retrieveBlockingModel | 是 | 否 | 3.0 行为未知 |
|
||||||
| 58 | retrieveBlockingModel | 是 | 否 | 3.0 行为未知 |
|
| 57 | tsdbMetaCompactRatio | 是 | 否 | 3.0 行为未知 |
|
||||||
| 59 | tsdbMetaCompactRatio | 是 | 否 | 3.0 行为未知 |
|
| 58 | defaultJSONStrType | 是 | 否 | 3.0 行为未知 |
|
||||||
| 60 | defaultJSONStrType | 是 | 否 | 3.0 行为未知 |
|
| 59 | walFlushSize | 是 | 否 | 3.0 行为未知 |
|
||||||
| 61 | walFlushSize | 是 | 否 | 3.0 行为未知 |
|
| 60 | flowctrl | 是 | 否 | 3.0 行为未知 |
|
||||||
| 62 | keepTimeOffset | 是 | 否 | 3.0 行为未知 |
|
| 61 | slaveQuery | 是 | 否 | 3.0 行为未知: slave vnode 是否能够处理查询? |
|
||||||
| 63 | flowctrl | 是 | 否 | 3.0 行为未知 |
|
| 62 | adjustMaster | 是 | 否 | 3.0 行为未知 |
|
||||||
| 64 | slaveQuery | 是 | 否 | 3.0 行为未知: slave vnode 是否能够处理查询? |
|
| 63 | topicBinaryLen | 是 | 否 | 3.0 行为未知 |
|
||||||
| 65 | adjustMaster | 是 | 否 | 3.0 行为未知 |
|
| 64 | telegrafUseFieldNum | 是 | 否 | 3.0 行为未知 |
|
||||||
| 66 | topicBinaryLen | 是 | 否 | 3.0 行为未知 |
|
| 65 | deadLockKillQuery | 是 | 否 | 3.0 行为未知 |
|
||||||
| 67 | telegrafUseFieldNum | 是 | 否 | 3.0 行为未知 |
|
| 66 | clientMerge | 是 | 否 | 3.0 行为未知 |
|
||||||
| 68 | deadLockKillQuery | 是 | 否 | 3.0 行为未知 |
|
| 67 | sdbDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||||
| 69 | clientMerge | 是 | 否 | 3.0 行为未知 |
|
| 68 | odbcDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||||
| 70 | sdbDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
| 69 | httpDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||||
| 71 | odbcDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
| 70 | monDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||||
| 72 | httpDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
| 71 | cqDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||||
| 73 | monDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
| 72 | shortcutFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||||
| 74 | cqDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
| 73 | probeSeconds | 是 | 否 | 3.0 行为未知 |
|
||||||
| 75 | shortcutFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
| 74 | probeKillSeconds | 是 | 否 | 3.0 行为未知 |
|
||||||
| 76 | probeSeconds | 是 | 否 | 3.0 行为未知 |
|
| 75 | probeInterval | 是 | 否 | 3.0 行为未知 |
|
||||||
| 77 | probeKillSeconds | 是 | 否 | 3.0 行为未知 |
|
| 76 | lossyColumns | 是 | 否 | 3.0 行为未知 |
|
||||||
| 78 | probeInterval | 是 | 否 | 3.0 行为未知 |
|
| 77 | fPrecision | 是 | 否 | 3.0 行为未知 |
|
||||||
| 79 | lossyColumns | 是 | 否 | 3.0 行为未知 |
|
| 78 | dPrecision | 是 | 否 | 3.0 行为未知 |
|
||||||
| 80 | fPrecision | 是 | 否 | 3.0 行为未知 |
|
| 79 | maxRange | 是 | 否 | 3.0 行为未知 |
|
||||||
| 81 | dPrecision | 是 | 否 | 3.0 行为未知 |
|
| 80 | range | 是 | 否 | 3.0 行为未知 |
|
||||||
| 82 | maxRange | 是 | 否 | 3.0 行为未知 |
|
|
||||||
| 83 | range | 是 | 否 | 3.0 行为未知 |
|
|
||||||
|
|
|
@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 3.0.7.0
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.7.0" />
|
||||||
|
|
||||||
## 3.0.6.0
|
## 3.0.6.0
|
||||||
|
|
||||||
<Release type="tdengine" version="3.0.6.0" />
|
<Release type="tdengine" version="3.0.6.0" />
|
||||||
|
|
|
@ -48,6 +48,7 @@ extern int32_t tsMaxNumOfDistinctResults;
|
||||||
extern int32_t tsCompatibleModel;
|
extern int32_t tsCompatibleModel;
|
||||||
extern bool tsPrintAuth;
|
extern bool tsPrintAuth;
|
||||||
extern int64_t tsTickPerMin[3];
|
extern int64_t tsTickPerMin[3];
|
||||||
|
extern int64_t tsTickPerHour[3];
|
||||||
extern int32_t tsCountAlwaysReturnValue;
|
extern int32_t tsCountAlwaysReturnValue;
|
||||||
extern float tsSelectivityRatio;
|
extern float tsSelectivityRatio;
|
||||||
extern int32_t tsTagFilterResCacheSize;
|
extern int32_t tsTagFilterResCacheSize;
|
||||||
|
@ -184,6 +185,7 @@ extern int32_t tsRpcRetryInterval;
|
||||||
extern bool tsDisableStream;
|
extern bool tsDisableStream;
|
||||||
extern int64_t tsStreamBufferSize;
|
extern int64_t tsStreamBufferSize;
|
||||||
extern bool tsFilterScalarMode;
|
extern bool tsFilterScalarMode;
|
||||||
|
extern int32_t tsKeepTimeOffset;
|
||||||
extern int32_t tsMaxStreamBackendCache;
|
extern int32_t tsMaxStreamBackendCache;
|
||||||
extern int32_t tsPQSortMemThreshold;
|
extern int32_t tsPQSortMemThreshold;
|
||||||
|
|
||||||
|
|
|
@ -3025,6 +3025,7 @@ typedef struct {
|
||||||
char* sql;
|
char* sql;
|
||||||
char* ast;
|
char* ast;
|
||||||
int64_t deleteMark;
|
int64_t deleteMark;
|
||||||
|
int64_t lastTs;
|
||||||
} SMCreateSmaReq;
|
} SMCreateSmaReq;
|
||||||
|
|
||||||
int32_t tSerializeSMCreateSmaReq(void* buf, int32_t bufLen, SMCreateSmaReq* pReq);
|
int32_t tSerializeSMCreateSmaReq(void* buf, int32_t bufLen, SMCreateSmaReq* pReq);
|
||||||
|
|
|
@ -41,23 +41,22 @@ typedef struct {
|
||||||
} SLocalFetch;
|
} SLocalFetch;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
void* tqReader;
|
void* tqReader;
|
||||||
void* config;
|
void* config;
|
||||||
void* vnode;
|
void* vnode;
|
||||||
void* mnd;
|
void* mnd;
|
||||||
SMsgCb* pMsgCb;
|
SMsgCb* pMsgCb;
|
||||||
int64_t version;
|
int64_t version;
|
||||||
bool initMetaReader;
|
bool initMetaReader;
|
||||||
bool initTableReader;
|
bool initTableReader;
|
||||||
bool initTqReader;
|
bool initTqReader;
|
||||||
int32_t numOfVgroups;
|
int32_t numOfVgroups;
|
||||||
void* sContext; // SSnapContext*
|
void* sContext; // SSnapContext*
|
||||||
|
void* pStateBackend;
|
||||||
|
int8_t fillHistory;
|
||||||
|
STimeWindow winRange;
|
||||||
|
|
||||||
void* pStateBackend;
|
|
||||||
struct SStorageAPI api;
|
struct SStorageAPI api;
|
||||||
|
|
||||||
int8_t fillHistory;
|
|
||||||
STimeWindow winRange;
|
|
||||||
} SReadHandle;
|
} SReadHandle;
|
||||||
|
|
||||||
// in queue mode, data streams are seperated by msg
|
// in queue mode, data streams are seperated by msg
|
||||||
|
@ -99,9 +98,6 @@ void qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId);
|
||||||
|
|
||||||
int32_t qSetStreamOpOpen(qTaskInfo_t tinfo);
|
int32_t qSetStreamOpOpen(qTaskInfo_t tinfo);
|
||||||
|
|
||||||
// todo refactor
|
|
||||||
void qGetCheckpointVersion(qTaskInfo_t tinfo, int64_t* dataVer, int64_t* ckId);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set multiple input data blocks for the stream scan.
|
* Set multiple input data blocks for the stream scan.
|
||||||
* @param tinfo
|
* @param tinfo
|
||||||
|
|
|
@ -380,7 +380,8 @@ typedef struct SStateStore {
|
||||||
SStreamStateCur* (*streamStateSessionSeekKeyCurrentNext)(SStreamState* pState, const SSessionKey* key);
|
SStreamStateCur* (*streamStateSessionSeekKeyCurrentNext)(SStreamState* pState, const SSessionKey* key);
|
||||||
|
|
||||||
struct SStreamFileState* (*streamFileStateInit)(int64_t memSize, uint32_t keySize, uint32_t rowSize,
|
struct SStreamFileState* (*streamFileStateInit)(int64_t memSize, uint32_t keySize, uint32_t rowSize,
|
||||||
uint32_t selectRowSize, GetTsFun fp, void* pFile, TSKEY delMark, const char*id);
|
uint32_t selectRowSize, GetTsFun fp, void* pFile, TSKEY delMark,
|
||||||
|
const char* id, int64_t ckId);
|
||||||
|
|
||||||
void (*streamFileStateDestroy)(struct SStreamFileState* pFileState);
|
void (*streamFileStateDestroy)(struct SStreamFileState* pFileState);
|
||||||
void (*streamFileStateClear)(struct SStreamFileState* pFileState);
|
void (*streamFileStateClear)(struct SStreamFileState* pFileState);
|
||||||
|
|
|
@ -319,19 +319,22 @@ typedef struct SIndexOptions {
|
||||||
SNode* pInterval;
|
SNode* pInterval;
|
||||||
SNode* pOffset;
|
SNode* pOffset;
|
||||||
SNode* pSliding;
|
SNode* pSliding;
|
||||||
|
int8_t tsPrecision;
|
||||||
SNode* pStreamOptions;
|
SNode* pStreamOptions;
|
||||||
} SIndexOptions;
|
} SIndexOptions;
|
||||||
|
|
||||||
typedef struct SCreateIndexStmt {
|
typedef struct SCreateIndexStmt {
|
||||||
ENodeType type;
|
ENodeType type;
|
||||||
EIndexType indexType;
|
EIndexType indexType;
|
||||||
bool ignoreExists;
|
bool ignoreExists;
|
||||||
char indexDbName[TSDB_DB_NAME_LEN];
|
char indexDbName[TSDB_DB_NAME_LEN];
|
||||||
char indexName[TSDB_INDEX_NAME_LEN];
|
char indexName[TSDB_INDEX_NAME_LEN];
|
||||||
char dbName[TSDB_DB_NAME_LEN];
|
char dbName[TSDB_DB_NAME_LEN];
|
||||||
char tableName[TSDB_TABLE_NAME_LEN];
|
char tableName[TSDB_TABLE_NAME_LEN];
|
||||||
SNodeList* pCols;
|
SNodeList* pCols;
|
||||||
SIndexOptions* pOptions;
|
SIndexOptions* pOptions;
|
||||||
|
SNode* pPrevQuery;
|
||||||
|
SMCreateSmaReq* pReq;
|
||||||
} SCreateIndexStmt;
|
} SCreateIndexStmt;
|
||||||
|
|
||||||
typedef struct SDropIndexStmt {
|
typedef struct SDropIndexStmt {
|
||||||
|
|
|
@ -246,7 +246,6 @@ typedef struct SSortLogicNode {
|
||||||
SLogicNode node;
|
SLogicNode node;
|
||||||
SNodeList* pSortKeys;
|
SNodeList* pSortKeys;
|
||||||
bool groupSort;
|
bool groupSort;
|
||||||
int64_t maxRows;
|
|
||||||
} SSortLogicNode;
|
} SSortLogicNode;
|
||||||
|
|
||||||
typedef struct SPartitionLogicNode {
|
typedef struct SPartitionLogicNode {
|
||||||
|
@ -524,7 +523,6 @@ typedef struct SSortPhysiNode {
|
||||||
SNodeList* pExprs; // these are expression list of order_by_clause and parameter expression of aggregate function
|
SNodeList* pExprs; // these are expression list of order_by_clause and parameter expression of aggregate function
|
||||||
SNodeList* pSortKeys; // element is SOrderByExprNode, and SOrderByExprNode::pExpr is SColumnNode
|
SNodeList* pSortKeys; // element is SOrderByExprNode, and SOrderByExprNode::pExpr is SColumnNode
|
||||||
SNodeList* pTargets;
|
SNodeList* pTargets;
|
||||||
int64_t maxRows;
|
|
||||||
} SSortPhysiNode;
|
} SSortPhysiNode;
|
||||||
|
|
||||||
typedef SSortPhysiNode SGroupSortPhysiNode;
|
typedef SSortPhysiNode SGroupSortPhysiNode;
|
||||||
|
|
|
@ -41,7 +41,7 @@ typedef struct SFilterColumnParam {
|
||||||
} SFilterColumnParam;
|
} SFilterColumnParam;
|
||||||
|
|
||||||
extern int32_t filterInitFromNode(SNode *pNode, SFilterInfo **pinfo, uint32_t options);
|
extern int32_t filterInitFromNode(SNode *pNode, SFilterInfo **pinfo, uint32_t options);
|
||||||
extern bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p, SColumnDataAgg *statis,
|
extern int32_t filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p, SColumnDataAgg *statis,
|
||||||
int16_t numOfCols, int32_t *pFilterResStatus);
|
int16_t numOfCols, int32_t *pFilterResStatus);
|
||||||
extern int32_t filterSetDataFromSlotId(SFilterInfo *info, void *param);
|
extern int32_t filterSetDataFromSlotId(SFilterInfo *info, void *param);
|
||||||
extern int32_t filterSetDataFromColId(SFilterInfo *info, void *param);
|
extern int32_t filterSetDataFromColId(SFilterInfo *info, void *param);
|
||||||
|
|
|
@ -31,7 +31,8 @@ typedef struct SStreamFileState SStreamFileState;
|
||||||
typedef SList SStreamSnapshot;
|
typedef SList SStreamSnapshot;
|
||||||
|
|
||||||
SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize,
|
SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize,
|
||||||
GetTsFun fp, void* pFile, TSKEY delMark, const char* id);
|
GetTsFun fp, void* pFile, TSKEY delMark, const char* taskId,
|
||||||
|
int64_t checkpointId);
|
||||||
void streamFileStateDestroy(SStreamFileState* pFileState);
|
void streamFileStateDestroy(SStreamFileState* pFileState);
|
||||||
void streamFileStateClear(SStreamFileState* pFileState);
|
void streamFileStateClear(SStreamFileState* pFileState);
|
||||||
bool needClearDiskBuff(SStreamFileState* pFileState);
|
bool needClearDiskBuff(SStreamFileState* pFileState);
|
||||||
|
@ -44,7 +45,7 @@ bool hasRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen);
|
||||||
|
|
||||||
SStreamSnapshot* getSnapshot(SStreamFileState* pFileState);
|
SStreamSnapshot* getSnapshot(SStreamFileState* pFileState);
|
||||||
int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, bool flushState);
|
int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, bool flushState);
|
||||||
int32_t recoverSnapshot(SStreamFileState* pFileState);
|
int32_t recoverSnapshot(SStreamFileState* pFileState, int64_t ckId);
|
||||||
|
|
||||||
int32_t getSnapshotIdList(SStreamFileState* pFileState, SArray* list);
|
int32_t getSnapshotIdList(SStreamFileState* pFileState, SArray* list);
|
||||||
int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark);
|
int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark);
|
||||||
|
|
|
@ -706,6 +706,7 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_PAR_INVALID_TIMELINE_QUERY TAOS_DEF_ERROR_CODE(0, 0x2666)
|
#define TSDB_CODE_PAR_INVALID_TIMELINE_QUERY TAOS_DEF_ERROR_CODE(0, 0x2666)
|
||||||
#define TSDB_CODE_PAR_INVALID_OPTR_USAGE TAOS_DEF_ERROR_CODE(0, 0x2667)
|
#define TSDB_CODE_PAR_INVALID_OPTR_USAGE TAOS_DEF_ERROR_CODE(0, 0x2667)
|
||||||
#define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x2668)
|
#define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x2668)
|
||||||
|
#define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED TAOS_DEF_ERROR_CODE(0, 0x2669)
|
||||||
#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF)
|
#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF)
|
||||||
|
|
||||||
//planner
|
//planner
|
||||||
|
@ -764,6 +765,9 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200)
|
#define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200)
|
||||||
#define TSDB_CODE_INDEX_INVALID_FILE TAOS_DEF_ERROR_CODE(0, 0x3201)
|
#define TSDB_CODE_INDEX_INVALID_FILE TAOS_DEF_ERROR_CODE(0, 0x3201)
|
||||||
|
|
||||||
|
//scalar
|
||||||
|
#define TSDB_CODE_SCALAR_CONVERT_ERROR TAOS_DEF_ERROR_CODE(0, 0x3250)
|
||||||
|
|
||||||
//tmq
|
//tmq
|
||||||
#define TSDB_CODE_TMQ_INVALID_MSG TAOS_DEF_ERROR_CODE(0, 0x4000)
|
#define TSDB_CODE_TMQ_INVALID_MSG TAOS_DEF_ERROR_CODE(0, 0x4000)
|
||||||
#define TSDB_CODE_TMQ_CONSUMER_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x4001)
|
#define TSDB_CODE_TMQ_CONSUMER_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x4001)
|
||||||
|
|
|
@ -77,7 +77,7 @@ PriorityQueueNode* taosPQTop(PriorityQueue* pq);
|
||||||
|
|
||||||
size_t taosPQSize(PriorityQueue* pq);
|
size_t taosPQSize(PriorityQueue* pq);
|
||||||
|
|
||||||
void taosPQPush(PriorityQueue* pq, const PriorityQueueNode* node);
|
PriorityQueueNode* taosPQPush(PriorityQueue* pq, const PriorityQueueNode* node);
|
||||||
|
|
||||||
void taosPQPop(PriorityQueue* pq);
|
void taosPQPop(PriorityQueue* pq);
|
||||||
|
|
||||||
|
@ -89,7 +89,13 @@ void taosBQSetFn(BoundedQueue* q, pq_comp_fn fn);
|
||||||
|
|
||||||
void destroyBoundedQueue(BoundedQueue* q);
|
void destroyBoundedQueue(BoundedQueue* q);
|
||||||
|
|
||||||
void taosBQPush(BoundedQueue* q, PriorityQueueNode* n);
|
/*
|
||||||
|
* Push one node into BQ
|
||||||
|
* @retval NULL if n is upper than top node in q, and n is not freed
|
||||||
|
* @retval the pushed Node if pushing succeeded
|
||||||
|
* @note if maxSize exceeded, the original highest node is popped and freed with deleteFn
|
||||||
|
* */
|
||||||
|
PriorityQueueNode* taosBQPush(BoundedQueue* q, PriorityQueueNode* n);
|
||||||
|
|
||||||
PriorityQueueNode* taosBQTop(BoundedQueue* q);
|
PriorityQueueNode* taosBQTop(BoundedQueue* q);
|
||||||
|
|
||||||
|
|
|
@ -108,6 +108,9 @@
|
||||||
# time period of keeping log files, in days
|
# time period of keeping log files, in days
|
||||||
# logKeepDays 0
|
# logKeepDays 0
|
||||||
|
|
||||||
|
# unit Hour. Latency of data migration
|
||||||
|
# keepTimeOffset 0
|
||||||
|
|
||||||
|
|
||||||
############ 3. Debug Flag and levels #############################################
|
############ 3. Debug Flag and levels #############################################
|
||||||
|
|
||||||
|
|
|
@ -102,8 +102,6 @@ static const SSysDbTableSchema userDBSchema[] = {
|
||||||
{.name = "wal_fsync_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
{.name = "wal_fsync_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
||||||
{.name = "wal_retention_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
{.name = "wal_retention_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
||||||
{.name = "wal_retention_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
|
{.name = "wal_retention_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
|
||||||
{.name = "wal_roll_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
|
||||||
{.name = "wal_segment_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
|
|
||||||
{.name = "stt_trigger", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
|
{.name = "stt_trigger", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
|
||||||
{.name = "table_prefix", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
|
{.name = "table_prefix", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
|
||||||
{.name = "table_suffix", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
|
{.name = "table_suffix", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
|
||||||
|
|
|
@ -186,6 +186,13 @@ bool tsDeployOnSnode = true;
|
||||||
* TSDB_TIME_PRECISION_NANO: 60000000000L
|
* TSDB_TIME_PRECISION_NANO: 60000000000L
|
||||||
*/
|
*/
|
||||||
int64_t tsTickPerMin[] = {60000L, 60000000L, 60000000000L};
|
int64_t tsTickPerMin[] = {60000L, 60000000L, 60000000000L};
|
||||||
|
/*
|
||||||
|
* millisecond by default
|
||||||
|
* for TSDB_TIME_PRECISION_MILLI: 3600000L
|
||||||
|
* TSDB_TIME_PRECISION_MICRO: 3600000000L
|
||||||
|
* TSDB_TIME_PRECISION_NANO: 3600000000000L
|
||||||
|
*/
|
||||||
|
int64_t tsTickPerHour[] = {3600000L, 3600000000L, 3600000000000L};
|
||||||
|
|
||||||
// lossy compress 6
|
// lossy compress 6
|
||||||
char tsLossyColumns[32] = ""; // "float|double" means all float and double columns can be lossy compressed. set empty
|
char tsLossyColumns[32] = ""; // "float|double" means all float and double columns can be lossy compressed. set empty
|
||||||
|
@ -216,6 +223,7 @@ char tsUdfdLdLibPath[512] = "";
|
||||||
bool tsDisableStream = false;
|
bool tsDisableStream = false;
|
||||||
int64_t tsStreamBufferSize = 128 * 1024 * 1024;
|
int64_t tsStreamBufferSize = 128 * 1024 * 1024;
|
||||||
bool tsFilterScalarMode = false;
|
bool tsFilterScalarMode = false;
|
||||||
|
int32_t tsKeepTimeOffset = 0; // latency of data migration
|
||||||
|
|
||||||
#ifndef _STORAGE
|
#ifndef _STORAGE
|
||||||
int32_t taosSetTfsCfg(SConfig *pCfg) {
|
int32_t taosSetTfsCfg(SConfig *pCfg) {
|
||||||
|
@ -535,6 +543,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
||||||
if (cfgAddInt32(pCfg, "cacheLazyLoadThreshold", tsCacheLazyLoadThreshold, 0, 100000, 0) != 0) return -1;
|
if (cfgAddInt32(pCfg, "cacheLazyLoadThreshold", tsCacheLazyLoadThreshold, 0, 100000, 0) != 0) return -1;
|
||||||
|
|
||||||
if (cfgAddBool(pCfg, "filterScalarMode", tsFilterScalarMode, 0) != 0) return -1;
|
if (cfgAddBool(pCfg, "filterScalarMode", tsFilterScalarMode, 0) != 0) return -1;
|
||||||
|
if (cfgAddInt32(pCfg, "keepTimeOffset", tsKeepTimeOffset, 0, 23, 0) != 0) return -1;
|
||||||
if (cfgAddInt32(pCfg, "maxStreamBackendCache", tsMaxStreamBackendCache, 16, 1024, 0) != 0) return -1;
|
if (cfgAddInt32(pCfg, "maxStreamBackendCache", tsMaxStreamBackendCache, 16, 1024, 0) != 0) return -1;
|
||||||
if (cfgAddInt32(pCfg, "pqSortMemThreshold", tsPQSortMemThreshold, 1, 10240, 0) != 0) return -1;
|
if (cfgAddInt32(pCfg, "pqSortMemThreshold", tsPQSortMemThreshold, 1, 10240, 0) != 0) return -1;
|
||||||
|
|
||||||
|
@ -918,6 +927,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
||||||
tsStreamBufferSize = cfgGetItem(pCfg, "streamBufferSize")->i64;
|
tsStreamBufferSize = cfgGetItem(pCfg, "streamBufferSize")->i64;
|
||||||
|
|
||||||
tsFilterScalarMode = cfgGetItem(pCfg, "filterScalarMode")->bval;
|
tsFilterScalarMode = cfgGetItem(pCfg, "filterScalarMode")->bval;
|
||||||
|
tsKeepTimeOffset = cfgGetItem(pCfg, "keepTimeOffset")->i32;
|
||||||
tsMaxStreamBackendCache = cfgGetItem(pCfg, "maxStreamBackendCache")->i32;
|
tsMaxStreamBackendCache = cfgGetItem(pCfg, "maxStreamBackendCache")->i32;
|
||||||
tsPQSortMemThreshold = cfgGetItem(pCfg, "pqSortMemThreshold")->i32;
|
tsPQSortMemThreshold = cfgGetItem(pCfg, "pqSortMemThreshold")->i32;
|
||||||
|
|
||||||
|
@ -1475,6 +1485,19 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (strcasecmp(option, "keepTimeOffset") == 0) {
|
||||||
|
int32_t newKeepTimeOffset = atoi(value);
|
||||||
|
if (newKeepTimeOffset < 0 || newKeepTimeOffset > 23) {
|
||||||
|
uError("failed to set keepTimeOffset from %d to %d. Valid range: [0, 23]", tsKeepTimeOffset, newKeepTimeOffset);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
uInfo("keepTimeOffset set from %d to %d", tsKeepTimeOffset, newKeepTimeOffset);
|
||||||
|
tsKeepTimeOffset = newKeepTimeOffset;
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
const char *options[] = {
|
const char *options[] = {
|
||||||
"dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag", "tqDebugFlag",
|
"dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag", "tqDebugFlag",
|
||||||
"fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tmrDebugFlag", "uDebugFlag",
|
"fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tmrDebugFlag", "uDebugFlag",
|
||||||
|
|
|
@ -835,6 +835,7 @@ int32_t tSerializeSMCreateSmaReq(void *buf, int32_t bufLen, SMCreateSmaReq *pReq
|
||||||
if (tEncodeBinary(&encoder, pReq->ast, pReq->astLen) < 0) return -1;
|
if (tEncodeBinary(&encoder, pReq->ast, pReq->astLen) < 0) return -1;
|
||||||
}
|
}
|
||||||
if (tEncodeI64(&encoder, pReq->deleteMark) < 0) return -1;
|
if (tEncodeI64(&encoder, pReq->deleteMark) < 0) return -1;
|
||||||
|
if (tEncodeI64(&encoder, pReq->lastTs) < 0) return -1;
|
||||||
tEndEncode(&encoder);
|
tEndEncode(&encoder);
|
||||||
|
|
||||||
int32_t tlen = encoder.pos;
|
int32_t tlen = encoder.pos;
|
||||||
|
@ -884,6 +885,7 @@ int32_t tDeserializeSMCreateSmaReq(void *buf, int32_t bufLen, SMCreateSmaReq *pR
|
||||||
if (tDecodeCStrTo(&decoder, pReq->ast) < 0) return -1;
|
if (tDecodeCStrTo(&decoder, pReq->ast) < 0) return -1;
|
||||||
}
|
}
|
||||||
if (tDecodeI64(&decoder, &pReq->deleteMark) < 0) return -1;
|
if (tDecodeI64(&decoder, &pReq->deleteMark) < 0) return -1;
|
||||||
|
if (tDecodeI64(&decoder, &pReq->lastTs) < 0) return -1;
|
||||||
tEndDecode(&decoder);
|
tEndDecode(&decoder);
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1840,12 +1840,6 @@ static void mndDumpDbInfoData(SMnode *pMnode, SSDataBlock *pBlock, SDbObj *pDb,
|
||||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||||
colDataSetVal(pColInfo, rows, (const char *)&pDb->cfg.walRetentionSize, false);
|
colDataSetVal(pColInfo, rows, (const char *)&pDb->cfg.walRetentionSize, false);
|
||||||
|
|
||||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
|
||||||
colDataSetVal(pColInfo, rows, (const char *)&pDb->cfg.walRollPeriod, false);
|
|
||||||
|
|
||||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
|
||||||
colDataSetVal(pColInfo, rows, (const char *)&pDb->cfg.walSegmentSize, false);
|
|
||||||
|
|
||||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||||
colDataSetVal(pColInfo, rows, (const char *)&pDb->cfg.sstTrigger, false);
|
colDataSetVal(pColInfo, rows, (const char *)&pDb->cfg.sstTrigger, false);
|
||||||
|
|
||||||
|
|
|
@ -78,6 +78,7 @@ ESyncRole vnodeGetRole(SVnode *pVnode);
|
||||||
int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list);
|
int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list);
|
||||||
int32_t vnodeGetCtbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg), void *arg);
|
int32_t vnodeGetCtbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg), void *arg);
|
||||||
int32_t vnodeGetStbIdList(SVnode *pVnode, int64_t suid, SArray *list);
|
int32_t vnodeGetStbIdList(SVnode *pVnode, int64_t suid, SArray *list);
|
||||||
|
int32_t vnodeGetStbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg, void* arg1), void *arg);
|
||||||
void *vnodeGetIdx(void *pVnode);
|
void *vnodeGetIdx(void *pVnode);
|
||||||
void *vnodeGetIvtIdx(void *pVnode);
|
void *vnodeGetIvtIdx(void *pVnode);
|
||||||
|
|
||||||
|
@ -126,6 +127,9 @@ tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name);
|
||||||
int32_t metaGetCachedTbGroup(void *pVnode, tb_uid_t suid, const uint8_t *pKey, int32_t keyLen, SArray **pList);
|
int32_t metaGetCachedTbGroup(void *pVnode, tb_uid_t suid, const uint8_t *pKey, int32_t keyLen, SArray **pList);
|
||||||
int32_t metaPutTbGroupToCache(void* pVnode, uint64_t suid, const void *pKey, int32_t keyLen, void *pPayload,
|
int32_t metaPutTbGroupToCache(void* pVnode, uint64_t suid, const void *pKey, int32_t keyLen, void *pPayload,
|
||||||
int32_t payloadLen);
|
int32_t payloadLen);
|
||||||
|
bool metaTbInFilterCache(void *pVnode, tb_uid_t suid, int8_t type);
|
||||||
|
int32_t metaPutTbToFilterCache(void *pVnode, tb_uid_t suid, int8_t type);
|
||||||
|
int32_t metaSizeOfTbFilterCache(void *pVnode, int8_t type);
|
||||||
|
|
||||||
int32_t metaGetStbStats(void *pVnode, int64_t uid, int64_t *numOfTables);
|
int32_t metaGetStbStats(void *pVnode, int64_t uid, int64_t *numOfTables);
|
||||||
|
|
||||||
|
|
|
@ -79,16 +79,18 @@ typedef struct {
|
||||||
TXN* pTxn;
|
TXN* pTxn;
|
||||||
} STtlDelTtlCtx;
|
} STtlDelTtlCtx;
|
||||||
|
|
||||||
int ttlMgrOpen(STtlManger** ppTtlMgr, TDB* pEnv, int8_t rollback);
|
int ttlMgrOpen(STtlManger** ppTtlMgr, TDB* pEnv, int8_t rollback);
|
||||||
int ttlMgrClose(STtlManger* pTtlMgr);
|
void ttlMgrClose(STtlManger* pTtlMgr);
|
||||||
int ttlMgrBegin(STtlManger* pTtlMgr, void* pMeta);
|
int ttlMgrPostOpen(STtlManger* pTtlMgr, void* pMeta);
|
||||||
|
|
||||||
int ttlMgrConvert(TTB* pOldTtlIdx, TTB* pNewTtlIdx, void* pMeta);
|
bool ttlMgrNeedUpgrade(TDB* pEnv);
|
||||||
int ttlMgrFlush(STtlManger* pTtlMgr, TXN* pTxn);
|
int ttlMgrUpgrade(STtlManger* pTtlMgr, void* pMeta);
|
||||||
|
|
||||||
int ttlMgrInsertTtl(STtlManger* pTtlMgr, const STtlUpdTtlCtx* pUpdCtx);
|
int ttlMgrInsertTtl(STtlManger* pTtlMgr, const STtlUpdTtlCtx* pUpdCtx);
|
||||||
int ttlMgrDeleteTtl(STtlManger* pTtlMgr, const STtlDelTtlCtx* pDelCtx);
|
int ttlMgrDeleteTtl(STtlManger* pTtlMgr, const STtlDelTtlCtx* pDelCtx);
|
||||||
int ttlMgrUpdateChangeTime(STtlManger* pTtlMgr, const STtlUpdCtimeCtx* pUpdCtimeCtx);
|
int ttlMgrUpdateChangeTime(STtlManger* pTtlMgr, const STtlUpdCtimeCtx* pUpdCtimeCtx);
|
||||||
|
|
||||||
|
int ttlMgrFlush(STtlManger* pTtlMgr, TXN* pTxn);
|
||||||
int ttlMgrFindExpired(STtlManger* pTtlMgr, int64_t timePointMs, SArray* pTbUids);
|
int ttlMgrFindExpired(STtlManger* pTtlMgr, int64_t timePointMs, SArray* pTbUids);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
|
|
@ -137,6 +137,7 @@ typedef struct STbUidStore STbUidStore;
|
||||||
#define META_BEGIN_HEAP_NIL 2
|
#define META_BEGIN_HEAP_NIL 2
|
||||||
|
|
||||||
int metaOpen(SVnode* pVnode, SMeta** ppMeta, int8_t rollback);
|
int metaOpen(SVnode* pVnode, SMeta** ppMeta, int8_t rollback);
|
||||||
|
int metaUpgrade(SVnode* pVnode, SMeta** ppMeta);
|
||||||
int metaClose(SMeta** pMeta);
|
int metaClose(SMeta** pMeta);
|
||||||
int metaBegin(SMeta* pMeta, int8_t fromSys);
|
int metaBegin(SMeta* pMeta, int8_t fromSys);
|
||||||
TXN* metaGetTxn(SMeta* pMeta);
|
TXN* metaGetTxn(SMeta* pMeta);
|
||||||
|
|
|
@ -66,6 +66,10 @@ struct SMetaCache {
|
||||||
SHashObj* pTableEntry;
|
SHashObj* pTableEntry;
|
||||||
SLRUCache* pResCache;
|
SLRUCache* pResCache;
|
||||||
} STbGroupResCache;
|
} STbGroupResCache;
|
||||||
|
|
||||||
|
struct STbFilterCache {
|
||||||
|
SHashObj* pStb;
|
||||||
|
} STbFilterCache;
|
||||||
};
|
};
|
||||||
|
|
||||||
static void entryCacheClose(SMeta* pMeta) {
|
static void entryCacheClose(SMeta* pMeta) {
|
||||||
|
@ -168,6 +172,12 @@ int32_t metaCacheOpen(SMeta* pMeta) {
|
||||||
taosHashSetFreeFp(pCache->STbGroupResCache.pTableEntry, freeCacheEntryFp);
|
taosHashSetFreeFp(pCache->STbGroupResCache.pTableEntry, freeCacheEntryFp);
|
||||||
taosThreadMutexInit(&pCache->STbGroupResCache.lock, NULL);
|
taosThreadMutexInit(&pCache->STbGroupResCache.lock, NULL);
|
||||||
|
|
||||||
|
pCache->STbFilterCache.pStb = taosHashInit(0, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
||||||
|
if (pCache->STbFilterCache.pStb == NULL) {
|
||||||
|
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
goto _err2;
|
||||||
|
}
|
||||||
|
|
||||||
pMeta->pCache = pCache;
|
pMeta->pCache = pCache;
|
||||||
return code;
|
return code;
|
||||||
|
|
||||||
|
@ -193,6 +203,8 @@ void metaCacheClose(SMeta* pMeta) {
|
||||||
taosThreadMutexDestroy(&pMeta->pCache->STbGroupResCache.lock);
|
taosThreadMutexDestroy(&pMeta->pCache->STbGroupResCache.lock);
|
||||||
taosHashCleanup(pMeta->pCache->STbGroupResCache.pTableEntry);
|
taosHashCleanup(pMeta->pCache->STbGroupResCache.pTableEntry);
|
||||||
|
|
||||||
|
taosHashCleanup(pMeta->pCache->STbFilterCache.pStb);
|
||||||
|
|
||||||
taosMemoryFree(pMeta->pCache);
|
taosMemoryFree(pMeta->pCache);
|
||||||
pMeta->pCache = NULL;
|
pMeta->pCache = NULL;
|
||||||
}
|
}
|
||||||
|
@ -880,3 +892,31 @@ int32_t metaTbGroupCacheClear(SMeta* pMeta, uint64_t suid) {
|
||||||
metaDebug("vgId:%d suid:%" PRId64 " cached related tb group cleared", vgId, suid);
|
metaDebug("vgId:%d suid:%" PRId64 " cached related tb group cleared", vgId, suid);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool metaTbInFilterCache(void* pVnode, tb_uid_t suid, int8_t type) {
|
||||||
|
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
|
||||||
|
|
||||||
|
if (type == 0 && taosHashGet(pMeta->pCache->STbFilterCache.pStb, &suid, sizeof(suid))) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t metaPutTbToFilterCache(void* pVnode, tb_uid_t suid, int8_t type) {
|
||||||
|
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
|
||||||
|
|
||||||
|
if (type == 0) {
|
||||||
|
return taosHashPut(pMeta->pCache->STbFilterCache.pStb, &suid, sizeof(suid), NULL, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t metaSizeOfTbFilterCache(void* pVnode, int8_t type) {
|
||||||
|
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
|
||||||
|
if (type == 0) {
|
||||||
|
return taosHashGetSize(pMeta->pCache->STbFilterCache.pStb);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -40,10 +40,6 @@ int metaBegin(SMeta *pMeta, int8_t heap) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ttlMgrBegin(pMeta->pTtlMgr, pMeta) < 0) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
tdbCommit(pMeta->pEnv, pMeta->txn);
|
tdbCommit(pMeta->pEnv, pMeta->txn);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -29,6 +29,8 @@ static int ncolIdxCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen
|
||||||
static int32_t metaInitLock(SMeta *pMeta) { return taosThreadRwlockInit(&pMeta->lock, NULL); }
|
static int32_t metaInitLock(SMeta *pMeta) { return taosThreadRwlockInit(&pMeta->lock, NULL); }
|
||||||
static int32_t metaDestroyLock(SMeta *pMeta) { return taosThreadRwlockDestroy(&pMeta->lock); }
|
static int32_t metaDestroyLock(SMeta *pMeta) { return taosThreadRwlockDestroy(&pMeta->lock); }
|
||||||
|
|
||||||
|
static void metaCleanup(SMeta **ppMeta);
|
||||||
|
|
||||||
int metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) {
|
int metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) {
|
||||||
SMeta *pMeta = NULL;
|
SMeta *pMeta = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -180,51 +182,43 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) {
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
_err:
|
_err:
|
||||||
if (pMeta->pIdx) metaCloseIdx(pMeta);
|
metaCleanup(&pMeta);
|
||||||
if (pMeta->pStreamDb) tdbTbClose(pMeta->pStreamDb);
|
|
||||||
if (pMeta->pNcolIdx) tdbTbClose(pMeta->pNcolIdx);
|
|
||||||
if (pMeta->pBtimeIdx) tdbTbClose(pMeta->pBtimeIdx);
|
|
||||||
if (pMeta->pSmaIdx) tdbTbClose(pMeta->pSmaIdx);
|
|
||||||
if (pMeta->pTtlMgr) ttlMgrClose(pMeta->pTtlMgr);
|
|
||||||
if (pMeta->pTagIvtIdx) indexClose(pMeta->pTagIvtIdx);
|
|
||||||
if (pMeta->pTagIdx) tdbTbClose(pMeta->pTagIdx);
|
|
||||||
if (pMeta->pCtbIdx) tdbTbClose(pMeta->pCtbIdx);
|
|
||||||
if (pMeta->pSuidIdx) tdbTbClose(pMeta->pSuidIdx);
|
|
||||||
if (pMeta->pNameIdx) tdbTbClose(pMeta->pNameIdx);
|
|
||||||
if (pMeta->pUidIdx) tdbTbClose(pMeta->pUidIdx);
|
|
||||||
if (pMeta->pSkmDb) tdbTbClose(pMeta->pSkmDb);
|
|
||||||
if (pMeta->pTbDb) tdbTbClose(pMeta->pTbDb);
|
|
||||||
if (pMeta->pEnv) tdbClose(pMeta->pEnv);
|
|
||||||
metaDestroyLock(pMeta);
|
|
||||||
taosMemoryFree(pMeta);
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int metaClose(SMeta **ppMeta) {
|
int metaUpgrade(SVnode *pVnode, SMeta **ppMeta) {
|
||||||
|
int code = TSDB_CODE_SUCCESS;
|
||||||
SMeta *pMeta = *ppMeta;
|
SMeta *pMeta = *ppMeta;
|
||||||
if (pMeta) {
|
|
||||||
if (pMeta->pEnv) metaAbort(pMeta);
|
|
||||||
if (pMeta->pCache) metaCacheClose(pMeta);
|
|
||||||
if (pMeta->pIdx) metaCloseIdx(pMeta);
|
|
||||||
if (pMeta->pStreamDb) tdbTbClose(pMeta->pStreamDb);
|
|
||||||
if (pMeta->pNcolIdx) tdbTbClose(pMeta->pNcolIdx);
|
|
||||||
if (pMeta->pBtimeIdx) tdbTbClose(pMeta->pBtimeIdx);
|
|
||||||
if (pMeta->pSmaIdx) tdbTbClose(pMeta->pSmaIdx);
|
|
||||||
if (pMeta->pTtlMgr) ttlMgrClose(pMeta->pTtlMgr);
|
|
||||||
if (pMeta->pTagIvtIdx) indexClose(pMeta->pTagIvtIdx);
|
|
||||||
if (pMeta->pTagIdx) tdbTbClose(pMeta->pTagIdx);
|
|
||||||
if (pMeta->pCtbIdx) tdbTbClose(pMeta->pCtbIdx);
|
|
||||||
if (pMeta->pSuidIdx) tdbTbClose(pMeta->pSuidIdx);
|
|
||||||
if (pMeta->pNameIdx) tdbTbClose(pMeta->pNameIdx);
|
|
||||||
if (pMeta->pUidIdx) tdbTbClose(pMeta->pUidIdx);
|
|
||||||
if (pMeta->pSkmDb) tdbTbClose(pMeta->pSkmDb);
|
|
||||||
if (pMeta->pTbDb) tdbTbClose(pMeta->pTbDb);
|
|
||||||
if (pMeta->pEnv) tdbClose(pMeta->pEnv);
|
|
||||||
metaDestroyLock(pMeta);
|
|
||||||
|
|
||||||
taosMemoryFreeClear(*ppMeta);
|
if (ttlMgrNeedUpgrade(pMeta->pEnv)) {
|
||||||
|
code = metaBegin(pMeta, META_BEGIN_HEAP_OS);
|
||||||
|
if (code < 0) {
|
||||||
|
metaError("vgId:%d, failed to upgrade meta, meta begin failed since %s", TD_VID(pVnode), tstrerror(terrno));
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
code = ttlMgrUpgrade(pMeta->pTtlMgr, pMeta);
|
||||||
|
if (code < 0) {
|
||||||
|
metaError("vgId:%d, failed to upgrade meta ttl since %s", TD_VID(pVnode), tstrerror(terrno));
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
code = metaCommit(pMeta, pMeta->txn);
|
||||||
|
if (code < 0) {
|
||||||
|
metaError("vgId:%d, failed to upgrade meta ttl, meta commit failed since %s", TD_VID(pVnode), tstrerror(terrno));
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
_err:
|
||||||
|
metaCleanup(ppMeta);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
int metaClose(SMeta **ppMeta) {
|
||||||
|
metaCleanup(ppMeta);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -270,6 +264,32 @@ int32_t metaULock(SMeta *pMeta) {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void metaCleanup(SMeta **ppMeta) {
|
||||||
|
SMeta *pMeta = *ppMeta;
|
||||||
|
if (pMeta) {
|
||||||
|
if (pMeta->pEnv) metaAbort(pMeta);
|
||||||
|
if (pMeta->pCache) metaCacheClose(pMeta);
|
||||||
|
if (pMeta->pIdx) metaCloseIdx(pMeta);
|
||||||
|
if (pMeta->pStreamDb) tdbTbClose(pMeta->pStreamDb);
|
||||||
|
if (pMeta->pNcolIdx) tdbTbClose(pMeta->pNcolIdx);
|
||||||
|
if (pMeta->pBtimeIdx) tdbTbClose(pMeta->pBtimeIdx);
|
||||||
|
if (pMeta->pSmaIdx) tdbTbClose(pMeta->pSmaIdx);
|
||||||
|
if (pMeta->pTtlMgr) ttlMgrClose(pMeta->pTtlMgr);
|
||||||
|
if (pMeta->pTagIvtIdx) indexClose(pMeta->pTagIvtIdx);
|
||||||
|
if (pMeta->pTagIdx) tdbTbClose(pMeta->pTagIdx);
|
||||||
|
if (pMeta->pCtbIdx) tdbTbClose(pMeta->pCtbIdx);
|
||||||
|
if (pMeta->pSuidIdx) tdbTbClose(pMeta->pSuidIdx);
|
||||||
|
if (pMeta->pNameIdx) tdbTbClose(pMeta->pNameIdx);
|
||||||
|
if (pMeta->pUidIdx) tdbTbClose(pMeta->pUidIdx);
|
||||||
|
if (pMeta->pSkmDb) tdbTbClose(pMeta->pSkmDb);
|
||||||
|
if (pMeta->pTbDb) tdbTbClose(pMeta->pTbDb);
|
||||||
|
if (pMeta->pEnv) tdbClose(pMeta->pEnv);
|
||||||
|
metaDestroyLock(pMeta);
|
||||||
|
|
||||||
|
taosMemoryFreeClear(*ppMeta);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int tbDbKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2) {
|
static int tbDbKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2) {
|
||||||
STbDbKey *pTbDbKey1 = (STbDbKey *)pKey1;
|
STbDbKey *pTbDbKey1 = (STbDbKey *)pKey1;
|
||||||
STbDbKey *pTbDbKey2 = (STbDbKey *)pKey2;
|
STbDbKey *pTbDbKey2 = (STbDbKey *)pKey2;
|
||||||
|
|
|
@ -21,6 +21,10 @@ typedef struct {
|
||||||
SMeta *pMeta;
|
SMeta *pMeta;
|
||||||
} SConvertData;
|
} SConvertData;
|
||||||
|
|
||||||
|
static void ttlMgrCleanup(STtlManger *pTtlMgr);
|
||||||
|
|
||||||
|
static int ttlMgrConvert(TTB *pOldTtlIdx, TTB *pNewTtlIdx, void *pMeta);
|
||||||
|
|
||||||
static void ttlMgrBuildKey(STtlIdxKeyV1 *pTtlKey, int64_t ttlDays, int64_t changeTimeMs, tb_uid_t uid);
|
static void ttlMgrBuildKey(STtlIdxKeyV1 *pTtlKey, int64_t ttlDays, int64_t changeTimeMs, tb_uid_t uid);
|
||||||
static int ttlIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
|
static int ttlIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
|
||||||
static int ttlIdxKeyV1Cmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
|
static int ttlIdxKeyV1Cmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
|
||||||
|
@ -36,27 +40,17 @@ const char *ttlTbname = "ttl.idx";
|
||||||
const char *ttlV1Tbname = "ttlv1.idx";
|
const char *ttlV1Tbname = "ttlv1.idx";
|
||||||
|
|
||||||
int ttlMgrOpen(STtlManger **ppTtlMgr, TDB *pEnv, int8_t rollback) {
|
int ttlMgrOpen(STtlManger **ppTtlMgr, TDB *pEnv, int8_t rollback) {
|
||||||
int ret;
|
int ret = TSDB_CODE_SUCCESS;
|
||||||
|
int64_t startNs = taosGetTimestampNs();
|
||||||
|
|
||||||
*ppTtlMgr = NULL;
|
*ppTtlMgr = NULL;
|
||||||
|
|
||||||
STtlManger *pTtlMgr = (STtlManger *)tdbOsCalloc(1, sizeof(*pTtlMgr));
|
STtlManger *pTtlMgr = (STtlManger *)tdbOsCalloc(1, sizeof(*pTtlMgr));
|
||||||
if (pTtlMgr == NULL) {
|
if (pTtlMgr == NULL) return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tdbTbExist(ttlTbname, pEnv)) {
|
|
||||||
ret = tdbTbOpen(ttlTbname, sizeof(STtlIdxKey), 0, ttlIdxKeyCmpr, pEnv, &pTtlMgr->pOldTtlIdx, rollback);
|
|
||||||
if (ret < 0) {
|
|
||||||
metaError("failed to open %s index since %s", ttlTbname, tstrerror(terrno));
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = tdbTbOpen(ttlV1Tbname, TDB_VARIANT_LEN, TDB_VARIANT_LEN, ttlIdxKeyV1Cmpr, pEnv, &pTtlMgr->pTtlIdx, rollback);
|
ret = tdbTbOpen(ttlV1Tbname, TDB_VARIANT_LEN, TDB_VARIANT_LEN, ttlIdxKeyV1Cmpr, pEnv, &pTtlMgr->pTtlIdx, rollback);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
metaError("failed to open %s since %s", ttlV1Tbname, tstrerror(terrno));
|
metaError("failed to open %s since %s", ttlV1Tbname, tstrerror(terrno));
|
||||||
|
|
||||||
tdbOsFree(pTtlMgr);
|
tdbOsFree(pTtlMgr);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -66,42 +60,57 @@ int ttlMgrOpen(STtlManger **ppTtlMgr, TDB *pEnv, int8_t rollback) {
|
||||||
|
|
||||||
taosThreadRwlockInit(&pTtlMgr->lock, NULL);
|
taosThreadRwlockInit(&pTtlMgr->lock, NULL);
|
||||||
|
|
||||||
|
ret = ttlMgrFillCache(pTtlMgr);
|
||||||
|
if (ret < 0) {
|
||||||
|
metaError("failed to fill hash since %s", tstrerror(terrno));
|
||||||
|
ttlMgrCleanup(pTtlMgr);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t endNs = taosGetTimestampNs();
|
||||||
|
metaInfo("ttl mgr open end, hash size: %d, time consumed: %" PRId64 " ns", taosHashGetSize(pTtlMgr->pTtlCache),
|
||||||
|
endNs - startNs);
|
||||||
|
|
||||||
*ppTtlMgr = pTtlMgr;
|
*ppTtlMgr = pTtlMgr;
|
||||||
return 0;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ttlMgrClose(STtlManger *pTtlMgr) {
|
void ttlMgrClose(STtlManger *pTtlMgr) { ttlMgrCleanup(pTtlMgr); }
|
||||||
taosHashCleanup(pTtlMgr->pTtlCache);
|
|
||||||
taosHashCleanup(pTtlMgr->pDirtyUids);
|
bool ttlMgrNeedUpgrade(TDB *pEnv) {
|
||||||
tdbTbClose(pTtlMgr->pTtlIdx);
|
bool needUpgrade = tdbTbExist(ttlTbname, pEnv);
|
||||||
taosThreadRwlockDestroy(&pTtlMgr->lock);
|
if (needUpgrade) {
|
||||||
tdbOsFree(pTtlMgr);
|
metaInfo("find ttl idx in old version , will convert");
|
||||||
return 0;
|
}
|
||||||
|
return needUpgrade;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ttlMgrBegin(STtlManger *pTtlMgr, void *pMeta) {
|
int ttlMgrUpgrade(STtlManger *pTtlMgr, void *pMeta) {
|
||||||
metaInfo("ttl mgr start open");
|
SMeta *meta = (SMeta *)pMeta;
|
||||||
int ret;
|
int ret = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
if (!tdbTbExist(ttlTbname, meta->pEnv)) return TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
metaInfo("ttl mgr start upgrade");
|
||||||
|
|
||||||
int64_t startNs = taosGetTimestampNs();
|
int64_t startNs = taosGetTimestampNs();
|
||||||
|
|
||||||
SMeta *meta = (SMeta *)pMeta;
|
ret = tdbTbOpen(ttlTbname, sizeof(STtlIdxKey), 0, ttlIdxKeyCmpr, meta->pEnv, &pTtlMgr->pOldTtlIdx, 0);
|
||||||
|
if (ret < 0) {
|
||||||
|
metaError("failed to open %s index since %s", ttlTbname, tstrerror(terrno));
|
||||||
|
goto _out;
|
||||||
|
}
|
||||||
|
|
||||||
if (pTtlMgr->pOldTtlIdx) {
|
ret = ttlMgrConvert(pTtlMgr->pOldTtlIdx, pTtlMgr->pTtlIdx, pMeta);
|
||||||
ret = ttlMgrConvert(pTtlMgr->pOldTtlIdx, pTtlMgr->pTtlIdx, pMeta);
|
if (ret < 0) {
|
||||||
if (ret < 0) {
|
metaError("failed to convert ttl index since %s", tstrerror(terrno));
|
||||||
metaError("failed to convert ttl index since %s", tstrerror(terrno));
|
goto _out;
|
||||||
goto _out;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
ret = tdbTbDropByName(ttlTbname, meta->pEnv, meta->txn);
|
ret = tdbTbDropByName(ttlTbname, meta->pEnv, meta->txn);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
metaError("failed to drop old ttl index since %s", tstrerror(terrno));
|
metaError("failed to drop old ttl index since %s", tstrerror(terrno));
|
||||||
goto _out;
|
goto _out;
|
||||||
}
|
|
||||||
|
|
||||||
tdbTbClose(pTtlMgr->pOldTtlIdx);
|
|
||||||
pTtlMgr->pOldTtlIdx = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = ttlMgrFillCache(pTtlMgr);
|
ret = ttlMgrFillCache(pTtlMgr);
|
||||||
|
@ -111,13 +120,23 @@ int ttlMgrBegin(STtlManger *pTtlMgr, void *pMeta) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t endNs = taosGetTimestampNs();
|
int64_t endNs = taosGetTimestampNs();
|
||||||
|
metaInfo("ttl mgr upgrade end, hash size: %d, time consumed: %" PRId64 " ns", taosHashGetSize(pTtlMgr->pTtlCache),
|
||||||
metaInfo("ttl mgr open end, hash size: %d, time consumed: %" PRId64 " ns", taosHashGetSize(pTtlMgr->pTtlCache),
|
|
||||||
endNs - startNs);
|
endNs - startNs);
|
||||||
_out:
|
_out:
|
||||||
|
tdbTbClose(pTtlMgr->pOldTtlIdx);
|
||||||
|
pTtlMgr->pOldTtlIdx = NULL;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ttlMgrCleanup(STtlManger *pTtlMgr) {
|
||||||
|
taosHashCleanup(pTtlMgr->pTtlCache);
|
||||||
|
taosHashCleanup(pTtlMgr->pDirtyUids);
|
||||||
|
tdbTbClose(pTtlMgr->pTtlIdx);
|
||||||
|
taosThreadRwlockDestroy(&pTtlMgr->lock);
|
||||||
|
tdbOsFree(pTtlMgr);
|
||||||
|
}
|
||||||
|
|
||||||
static void ttlMgrBuildKey(STtlIdxKeyV1 *pTtlKey, int64_t ttlDays, int64_t changeTimeMs, tb_uid_t uid) {
|
static void ttlMgrBuildKey(STtlIdxKeyV1 *pTtlKey, int64_t ttlDays, int64_t changeTimeMs, tb_uid_t uid) {
|
||||||
if (ttlDays <= 0) return;
|
if (ttlDays <= 0) return;
|
||||||
|
|
||||||
|
@ -205,7 +224,7 @@ _out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ttlMgrConvert(TTB *pOldTtlIdx, TTB *pNewTtlIdx, void *pMeta) {
|
static int ttlMgrConvert(TTB *pOldTtlIdx, TTB *pNewTtlIdx, void *pMeta) {
|
||||||
SMeta *meta = pMeta;
|
SMeta *meta = pMeta;
|
||||||
|
|
||||||
metaInfo("ttlMgr convert ttl start.");
|
metaInfo("ttlMgr convert ttl start.");
|
||||||
|
|
|
@ -757,14 +757,22 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
||||||
pTask->pMsgCb = &pTq->pVnode->msgCb;
|
pTask->pMsgCb = &pTq->pVnode->msgCb;
|
||||||
pTask->pMeta = pTq->pStreamMeta;
|
pTask->pMeta = pTq->pStreamMeta;
|
||||||
|
|
||||||
pTask->chkInfo.currentVer = ver;
|
// checkpoint exists, restore from the last checkpoint
|
||||||
|
if (pTask->chkInfo.keptCheckpointId != 0) {
|
||||||
pTask->dataRange.range.maxVer = ver;
|
ASSERT(pTask->chkInfo.version > 0);
|
||||||
pTask->dataRange.range.minVer = ver;
|
pTask->chkInfo.currentVer = pTask->chkInfo.version;
|
||||||
|
pTask->dataRange.range.maxVer = pTask->chkInfo.version;
|
||||||
|
pTask->dataRange.range.minVer = pTask->chkInfo.version;
|
||||||
|
pTask->chkInfo.currentVer = pTask->chkInfo.version;
|
||||||
|
} else {
|
||||||
|
pTask->chkInfo.currentVer = ver;
|
||||||
|
pTask->dataRange.range.maxVer = ver;
|
||||||
|
pTask->dataRange.range.minVer = ver;
|
||||||
|
}
|
||||||
|
|
||||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||||
SStreamTask* pSateTask = pTask;
|
SStreamTask* pSateTask = pTask;
|
||||||
SStreamTask task = {0};
|
SStreamTask task = {0};
|
||||||
if (pTask->info.fillHistory) {
|
if (pTask->info.fillHistory) {
|
||||||
task.id = pTask->streamTaskId;
|
task.id = pTask->streamTaskId;
|
||||||
task.pMeta = pTask->pMeta;
|
task.pMeta = pTask->pMeta;
|
||||||
|
@ -777,12 +785,14 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SReadHandle handle = {
|
SReadHandle handle = {
|
||||||
|
.version = pTask->chkInfo.currentVer,
|
||||||
.vnode = pTq->pVnode,
|
.vnode = pTq->pVnode,
|
||||||
.initTqReader = 1,
|
.initTqReader = 1,
|
||||||
.pStateBackend = pTask->pState,
|
.pStateBackend = pTask->pState,
|
||||||
.fillHistory = pTask->info.fillHistory,
|
.fillHistory = pTask->info.fillHistory,
|
||||||
.winRange = pTask->dataRange.window,
|
.winRange = pTask->dataRange.window,
|
||||||
};
|
};
|
||||||
|
|
||||||
initStorageAPI(&handle.api);
|
initStorageAPI(&handle.api);
|
||||||
|
|
||||||
pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, vgId);
|
pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, vgId);
|
||||||
|
@ -793,12 +803,13 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
||||||
qSetTaskId(pTask->exec.pExecutor, pTask->id.taskId, pTask->id.streamId);
|
qSetTaskId(pTask->exec.pExecutor, pTask->id.taskId, pTask->id.streamId);
|
||||||
} else if (pTask->info.taskLevel == TASK_LEVEL__AGG) {
|
} else if (pTask->info.taskLevel == TASK_LEVEL__AGG) {
|
||||||
SStreamTask* pSateTask = pTask;
|
SStreamTask* pSateTask = pTask;
|
||||||
SStreamTask task = {0};
|
SStreamTask task = {0};
|
||||||
if (pTask->info.fillHistory) {
|
if (pTask->info.fillHistory) {
|
||||||
task.id = pTask->streamTaskId;
|
task.id = pTask->streamTaskId;
|
||||||
task.pMeta = pTask->pMeta;
|
task.pMeta = pTask->pMeta;
|
||||||
pSateTask = &task;
|
pSateTask = &task;
|
||||||
}
|
}
|
||||||
|
|
||||||
pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pSateTask, false, -1, -1);
|
pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pSateTask, false, -1, -1);
|
||||||
if (pTask->pState == NULL) {
|
if (pTask->pState == NULL) {
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -806,6 +817,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
||||||
|
|
||||||
int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTask->pUpstreamEpInfoList);
|
int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTask->pUpstreamEpInfoList);
|
||||||
SReadHandle handle = {
|
SReadHandle handle = {
|
||||||
|
.version = pTask->chkInfo.currentVer,
|
||||||
.vnode = NULL,
|
.vnode = NULL,
|
||||||
.numOfVgroups = numOfVgroups,
|
.numOfVgroups = numOfVgroups,
|
||||||
.pStateBackend = pTask->pState,
|
.pStateBackend = pTask->pState,
|
||||||
|
@ -844,6 +856,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
||||||
if (pTask->tbSink.pTSchema == NULL) {
|
if (pTask->tbSink.pTSchema == NULL) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
pTask->tbSink.pTblInfo = tSimpleHashInit(10240, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
|
pTask->tbSink.pTblInfo = tSimpleHashInit(10240, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
|
||||||
tSimpleHashSetFreeFp(pTask->tbSink.pTblInfo, freePtr);
|
tSimpleHashSetFreeFp(pTask->tbSink.pTblInfo, freePtr);
|
||||||
}
|
}
|
||||||
|
@ -861,6 +874,11 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
||||||
vgId, pTask->id.idStr, pChkInfo->keptCheckpointId, pChkInfo->version, pChkInfo->currentVer,
|
vgId, pTask->id.idStr, pChkInfo->keptCheckpointId, pChkInfo->version, pChkInfo->currentVer,
|
||||||
pTask->info.selfChildId, pTask->info.taskLevel, pTask->info.fillHistory, pTask->triggerParam);
|
pTask->info.selfChildId, pTask->info.taskLevel, pTask->info.fillHistory, pTask->triggerParam);
|
||||||
|
|
||||||
|
if (pTask->chkInfo.keptCheckpointId != 0) {
|
||||||
|
tqInfo("s-task:%s restore from the checkpointId:%" PRId64 " ver:%" PRId64 " currentVer:%" PRId64, pTask->id.idStr,
|
||||||
|
pChkInfo->keptCheckpointId, pChkInfo->version, pChkInfo->currentVer);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1283,14 +1301,17 @@ int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec) {
|
||||||
SDecoder decoder;
|
SDecoder decoder;
|
||||||
tDecoderInit(&decoder, (uint8_t*)msgBody, msgLen);
|
tDecoderInit(&decoder, (uint8_t*)msgBody, msgLen);
|
||||||
tDecodeStreamDispatchReq(&decoder, &req);
|
tDecodeStreamDispatchReq(&decoder, &req);
|
||||||
|
tDecoderClear(&decoder);
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.taskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.taskId);
|
||||||
if (pTask) {
|
if (pTask != NULL) {
|
||||||
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
||||||
streamProcessDispatchMsg(pTask, &req, &rsp, exec);
|
streamProcessDispatchMsg(pTask, &req, &rsp, exec);
|
||||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
|
tqError("vgId:%d failed to find task:0x%x to handle the dispatch req, it may have been destroyed already",
|
||||||
|
pTq->pStreamMeta->vgId, req.taskId);
|
||||||
tDeleteStreamDispatchReq(&req);
|
tDeleteStreamDispatchReq(&req);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -1565,27 +1586,25 @@ int32_t tqProcessStreamCheckPointReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
if (tDecodeStreamCheckpointReq(&decoder, &req) < 0) {
|
if (tDecodeStreamCheckpointReq(&decoder, &req) < 0) {
|
||||||
code = TSDB_CODE_MSG_DECODE_ERROR;
|
code = TSDB_CODE_MSG_DECODE_ERROR;
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
goto FAIL;
|
return code;
|
||||||
}
|
}
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, req.downstreamTaskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pMeta, req.downstreamTaskId);
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
tqError("vgId:%d failed to find s-task:0x%x , it may have been destroyed already", vgId, req.downstreamTaskId);
|
tqError("vgId:%d failed to find s-task:0x%x , it may have been destroyed already", vgId, req.downstreamTaskId);
|
||||||
goto FAIL;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = streamAddCheckpointRspMsg(&req, &pMsg->info, pTask);
|
code = streamAddCheckpointRspMsg(&req, &pMsg->info, pTask);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
goto FAIL;
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
streamProcessCheckpointReq(pTask, &req);
|
streamProcessCheckpointReq(pTask, &req);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
return code;
|
return code;
|
||||||
|
|
||||||
FAIL:
|
|
||||||
return code;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// downstream task has complete the stream task checkpoint procedure
|
// downstream task has complete the stream task checkpoint procedure
|
||||||
|
@ -1605,14 +1624,14 @@ int32_t tqProcessStreamCheckPointRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
if (tDecodeStreamCheckpointRsp(&decoder, &req) < 0) {
|
if (tDecodeStreamCheckpointRsp(&decoder, &req) < 0) {
|
||||||
code = TSDB_CODE_MSG_DECODE_ERROR;
|
code = TSDB_CODE_MSG_DECODE_ERROR;
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
goto FAIL;
|
return code;
|
||||||
}
|
}
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, req.upstreamTaskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pMeta, req.upstreamTaskId);
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
tqError("vgId:%d failed to find s-task:0x%x , it may have been destroyed already", vgId, req.downstreamTaskId);
|
tqError("vgId:%d failed to find s-task:0x%x , it may have been destroyed already", vgId, req.downstreamTaskId);
|
||||||
goto FAIL;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
tqDebug("vgId:%d s-task:%s received the checkpoint rsp, handle it", vgId, pTask->id.idStr);
|
tqDebug("vgId:%d s-task:%s received the checkpoint rsp, handle it", vgId, pTask->id.idStr);
|
||||||
|
@ -1620,7 +1639,4 @@ int32_t tqProcessStreamCheckPointRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
streamProcessCheckpointRsp(pMeta, pTask);
|
streamProcessCheckpointRsp(pMeta, pTask);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
return code;
|
return code;
|
||||||
|
|
||||||
FAIL:
|
|
||||||
return code;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -542,6 +542,8 @@ int32_t tsdbFidLevel(int32_t fid, STsdbKeepCfg *pKeepCfg, int64_t nowSec) {
|
||||||
ASSERT(0);
|
ASSERT(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nowSec = nowSec - tsKeepTimeOffset * tsTickPerHour[pKeepCfg->precision];
|
||||||
|
|
||||||
key = nowSec - pKeepCfg->keep0 * tsTickPerMin[pKeepCfg->precision];
|
key = nowSec - pKeepCfg->keep0 * tsTickPerMin[pKeepCfg->precision];
|
||||||
aFid[0] = tsdbKeyFid(key, pKeepCfg->days, pKeepCfg->precision);
|
aFid[0] = tsdbKeyFid(key, pKeepCfg->days, pKeepCfg->precision);
|
||||||
key = nowSec - pKeepCfg->keep1 * tsTickPerMin[pKeepCfg->precision];
|
key = nowSec - pKeepCfg->keep1 * tsTickPerMin[pKeepCfg->precision];
|
||||||
|
|
|
@ -76,7 +76,7 @@ int32_t vnodeAlterReplica(const char *path, SAlterVnodeReplicaReq *pReq, STfs *p
|
||||||
}
|
}
|
||||||
|
|
||||||
SSyncCfg *pCfg = &info.config.syncCfg;
|
SSyncCfg *pCfg = &info.config.syncCfg;
|
||||||
|
|
||||||
pCfg->replicaNum = 0;
|
pCfg->replicaNum = 0;
|
||||||
pCfg->totalReplicaNum = 0;
|
pCfg->totalReplicaNum = 0;
|
||||||
memset(&pCfg->nodeInfo, 0, sizeof(pCfg->nodeInfo));
|
memset(&pCfg->nodeInfo, 0, sizeof(pCfg->nodeInfo));
|
||||||
|
@ -109,7 +109,7 @@ int32_t vnodeAlterReplica(const char *path, SAlterVnodeReplicaReq *pReq, STfs *p
|
||||||
pCfg->myIndex = pReq->replica + pReq->learnerSelfIndex;
|
pCfg->myIndex = pReq->replica + pReq->learnerSelfIndex;
|
||||||
}
|
}
|
||||||
|
|
||||||
vInfo("vgId:%d, save config while alter, replicas:%d totalReplicas:%d selfIndex:%d",
|
vInfo("vgId:%d, save config while alter, replicas:%d totalReplicas:%d selfIndex:%d",
|
||||||
pReq->vgId, pCfg->replicaNum, pCfg->totalReplicaNum, pCfg->myIndex);
|
pReq->vgId, pCfg->replicaNum, pCfg->totalReplicaNum, pCfg->myIndex);
|
||||||
|
|
||||||
info.config.syncCfg = *pCfg;
|
info.config.syncCfg = *pCfg;
|
||||||
|
@ -372,6 +372,10 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) {
|
||||||
goto _err;
|
goto _err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (metaUpgrade(pVnode, &pVnode->pMeta) < 0) {
|
||||||
|
vError("vgId:%d, failed to upgrade meta since %s", TD_VID(pVnode), tstrerror(terrno));
|
||||||
|
}
|
||||||
|
|
||||||
// open tsdb
|
// open tsdb
|
||||||
if (!VND_IS_RSMA(pVnode) && tsdbOpen(pVnode, &VND_TSDB(pVnode), VNODE_TSDB_DIR, NULL, rollback) < 0) {
|
if (!VND_IS_RSMA(pVnode) && tsdbOpen(pVnode, &VND_TSDB(pVnode), VNODE_TSDB_DIR, NULL, rollback) < 0) {
|
||||||
vError("vgId:%d, failed to open vnode tsdb since %s", TD_VID(pVnode), tstrerror(terrno));
|
vError("vgId:%d, failed to open vnode tsdb since %s", TD_VID(pVnode), tstrerror(terrno));
|
||||||
|
|
|
@ -496,6 +496,30 @@ int32_t vnodeGetStbIdList(SVnode *pVnode, int64_t suid, SArray *list) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t vnodeGetStbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg, void *arg1),
|
||||||
|
void *arg) {
|
||||||
|
SMStbCursor *pCur = metaOpenStbCursor(pVnode->pMeta, suid);
|
||||||
|
if (!pCur) {
|
||||||
|
return TSDB_CODE_FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
tb_uid_t id = metaStbCursorNext(pCur);
|
||||||
|
if (id == 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((*filter) && (*filter)(arg, &id)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
taosArrayPush(list, &id);
|
||||||
|
}
|
||||||
|
|
||||||
|
metaCloseStbCursor(pCur);
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num) {
|
int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num) {
|
||||||
SMCtbCursor *pCur = metaOpenCtbCursor(pVnode->pMeta, suid, 0);
|
SMCtbCursor *pCur = metaOpenCtbCursor(pVnode->pMeta, suid, 0);
|
||||||
if (!pCur) {
|
if (!pCur) {
|
||||||
|
@ -531,6 +555,58 @@ static int32_t vnodeGetStbColumnNum(SVnode *pVnode, tb_uid_t suid, int *num) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef TD_ENTERPRISE
|
||||||
|
#define TK_LOG_STB_NUM 19
|
||||||
|
static const char *tkLogStb[TK_LOG_STB_NUM] = {"cluster_info",
|
||||||
|
"data_dir",
|
||||||
|
"dnodes_info",
|
||||||
|
"d_info",
|
||||||
|
"grants_info",
|
||||||
|
"keeper_monitor",
|
||||||
|
"logs",
|
||||||
|
"log_dir",
|
||||||
|
"log_summary",
|
||||||
|
"m_info",
|
||||||
|
"taosadapter_restful_http_request_fail",
|
||||||
|
"taosadapter_restful_http_request_in_flight",
|
||||||
|
"taosadapter_restful_http_request_summary_milliseconds",
|
||||||
|
"taosadapter_restful_http_request_total",
|
||||||
|
"taosadapter_system_cpu_percent",
|
||||||
|
"taosadapter_system_mem_percent",
|
||||||
|
"temp_dir",
|
||||||
|
"vgroups_info",
|
||||||
|
"vnodes_role"};
|
||||||
|
|
||||||
|
// exclude stbs of taoskeeper log
|
||||||
|
static int32_t vnodeGetTimeSeriesBlackList(SVnode *pVnode) {
|
||||||
|
char *dbName = strchr(pVnode->config.dbname, '.');
|
||||||
|
if (!dbName || 0 != strncmp(++dbName, "log", TSDB_DB_NAME_LEN)) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
int32_t tbSize = metaSizeOfTbFilterCache(pVnode, 0);
|
||||||
|
if (tbSize < TK_LOG_STB_NUM) {
|
||||||
|
for (int32_t i = 0; i < TK_LOG_STB_NUM; ++i) {
|
||||||
|
tb_uid_t suid = metaGetTableEntryUidByName(pVnode->pMeta, tkLogStb[i]);
|
||||||
|
if (suid != 0) {
|
||||||
|
metaPutTbToFilterCache(pVnode, suid, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tbSize = metaSizeOfTbFilterCache(pVnode, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
return tbSize;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static bool vnodeTimeSeriesFilter(void *arg1, void *arg2) {
|
||||||
|
SVnode *pVnode = (SVnode *)arg1;
|
||||||
|
|
||||||
|
if (metaTbInFilterCache(pVnode, *(tb_uid_t *)(arg2), 0)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t vnodeGetTimeSeriesNum(SVnode *pVnode, int64_t *num) {
|
int32_t vnodeGetTimeSeriesNum(SVnode *pVnode, int64_t *num) {
|
||||||
SArray *suidList = NULL;
|
SArray *suidList = NULL;
|
||||||
|
|
||||||
|
@ -539,7 +615,13 @@ int32_t vnodeGetTimeSeriesNum(SVnode *pVnode, int64_t *num) {
|
||||||
return TSDB_CODE_FAILED;
|
return TSDB_CODE_FAILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vnodeGetStbIdList(pVnode, 0, suidList) < 0) {
|
int32_t tbFilterSize = 0;
|
||||||
|
#ifdef TD_ENTERPRISE
|
||||||
|
tbFilterSize = vnodeGetTimeSeriesBlackList(pVnode);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if ((!tbFilterSize && vnodeGetStbIdList(pVnode, 0, suidList) < 0) ||
|
||||||
|
(tbFilterSize && vnodeGetStbIdListByFilter(pVnode, 0, suidList, vnodeTimeSeriesFilter, pVnode) < 0)) {
|
||||||
qError("vgId:%d, failed to get stb id list error: %s", TD_VID(pVnode), terrstr());
|
qError("vgId:%d, failed to get stb id list error: %s", TD_VID(pVnode), terrstr());
|
||||||
taosArrayDestroy(suidList);
|
taosArrayDestroy(suidList);
|
||||||
return TSDB_CODE_FAILED;
|
return TSDB_CODE_FAILED;
|
||||||
|
|
|
@ -291,12 +291,11 @@ static void setCreateDBResultIntoDataBlock(SSDataBlock* pBlock, char* dbName, ch
|
||||||
"CREATE DATABASE `%s` BUFFER %d CACHESIZE %d CACHEMODEL '%s' COMP %d DURATION %dm "
|
"CREATE DATABASE `%s` BUFFER %d CACHESIZE %d CACHEMODEL '%s' COMP %d DURATION %dm "
|
||||||
"WAL_FSYNC_PERIOD %d MAXROWS %d MINROWS %d STT_TRIGGER %d KEEP %dm,%dm,%dm PAGES %d PAGESIZE %d PRECISION '%s' REPLICA %d "
|
"WAL_FSYNC_PERIOD %d MAXROWS %d MINROWS %d STT_TRIGGER %d KEEP %dm,%dm,%dm PAGES %d PAGESIZE %d PRECISION '%s' REPLICA %d "
|
||||||
"WAL_LEVEL %d VGROUPS %d SINGLE_STABLE %d TABLE_PREFIX %d TABLE_SUFFIX %d TSDB_PAGESIZE %d "
|
"WAL_LEVEL %d VGROUPS %d SINGLE_STABLE %d TABLE_PREFIX %d TABLE_SUFFIX %d TSDB_PAGESIZE %d "
|
||||||
"WAL_RETENTION_PERIOD %d WAL_RETENTION_SIZE %" PRId64 " WAL_ROLL_PERIOD %d WAL_SEGMENT_SIZE %" PRId64,
|
"WAL_RETENTION_PERIOD %d WAL_RETENTION_SIZE %" PRId64,
|
||||||
dbName, pCfg->buffer, pCfg->cacheSize, cacheModelStr(pCfg->cacheLast), pCfg->compression, pCfg->daysPerFile,
|
dbName, pCfg->buffer, pCfg->cacheSize, cacheModelStr(pCfg->cacheLast), pCfg->compression, pCfg->daysPerFile,
|
||||||
pCfg->walFsyncPeriod, pCfg->maxRows, pCfg->minRows, pCfg->sstTrigger, pCfg->daysToKeep0, pCfg->daysToKeep1, pCfg->daysToKeep2,
|
pCfg->walFsyncPeriod, pCfg->maxRows, pCfg->minRows, pCfg->sstTrigger, pCfg->daysToKeep0, pCfg->daysToKeep1, pCfg->daysToKeep2,
|
||||||
pCfg->pages, pCfg->pageSize, prec, pCfg->replications, pCfg->walLevel, pCfg->numOfVgroups,
|
pCfg->pages, pCfg->pageSize, prec, pCfg->replications, pCfg->walLevel, pCfg->numOfVgroups,
|
||||||
1 == pCfg->numOfStables, hashPrefix, pCfg->hashSuffix, pCfg->tsdbPageSize, pCfg->walRetentionPeriod,
|
1 == pCfg->numOfStables, hashPrefix, pCfg->hashSuffix, pCfg->tsdbPageSize, pCfg->walRetentionPeriod, pCfg->walRetentionSize);
|
||||||
pCfg->walRetentionSize, pCfg->walRollPeriod, pCfg->walSegmentSize);
|
|
||||||
|
|
||||||
if (retentions) {
|
if (retentions) {
|
||||||
len += sprintf(buf2 + VARSTR_HEADER_SIZE + len, " RETENTIONS %s", retentions);
|
len += sprintf(buf2 + VARSTR_HEADER_SIZE + len, " RETENTIONS %s", retentions);
|
||||||
|
|
|
@ -627,7 +627,7 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* de
|
||||||
|
|
||||||
extern void doDestroyExchangeOperatorInfo(void* param);
|
extern void doDestroyExchangeOperatorInfo(void* param);
|
||||||
|
|
||||||
void doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pColMatchInfo);
|
int32_t doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pColMatchInfo);
|
||||||
int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int32_t numOfExpr, SSDataBlock* pBlock,
|
int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int32_t numOfExpr, SSDataBlock* pBlock,
|
||||||
int32_t rows, const char* idStr, STableMetaCacheInfo* pCache);
|
int32_t rows, const char* idStr, STableMetaCacheInfo* pCache);
|
||||||
|
|
||||||
|
|
|
@ -105,7 +105,7 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMerge
|
||||||
|
|
||||||
SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SMergeAlignedIntervalPhysiNode* pNode, SExecTaskInfo* pTaskInfo);
|
SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SMergeAlignedIntervalPhysiNode* pNode, SExecTaskInfo* pTaskInfo);
|
||||||
|
|
||||||
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild);
|
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild, SReadHandle* pHandle);
|
||||||
|
|
||||||
SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionWinodwPhysiNode* pSessionNode, SExecTaskInfo* pTaskInfo);
|
SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionWinodwPhysiNode* pSessionNode, SExecTaskInfo* pTaskInfo);
|
||||||
|
|
||||||
|
@ -133,7 +133,7 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
|
||||||
|
|
||||||
SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild, SReadHandle* pHandle);
|
SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild, SReadHandle* pHandle);
|
||||||
|
|
||||||
SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo);
|
SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHandle* pHandle);
|
||||||
|
|
||||||
SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHandle* pHandle);
|
SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHandle* pHandle);
|
||||||
|
|
||||||
|
|
|
@ -69,8 +69,6 @@ typedef struct {
|
||||||
SVersionRange fillHistoryVer;
|
SVersionRange fillHistoryVer;
|
||||||
STimeWindow fillHistoryWindow;
|
STimeWindow fillHistoryWindow;
|
||||||
SStreamState* pState;
|
SStreamState* pState;
|
||||||
int64_t dataVersion;
|
|
||||||
int64_t checkPointId;
|
|
||||||
} SStreamTaskInfo;
|
} SStreamTaskInfo;
|
||||||
|
|
||||||
struct SExecTaskInfo {
|
struct SExecTaskInfo {
|
||||||
|
|
|
@ -64,8 +64,8 @@ typedef int32_t (*_sort_merge_compar_fn_t)(const void* p1, const void* p2, void*
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* @param type
|
* @param type
|
||||||
* @param maxRows keep maxRows at most
|
* @param maxRows keep maxRows at most, if 0, pq sort will not be used
|
||||||
* @param maxTupleLength max len of one tuple, for check if heap sort is applicable
|
* @param maxTupleLength max len of one tuple, for check if pq sort is applicable
|
||||||
* @param sortBufSize sort memory buf size, for check if heap sort is applicable
|
* @param sortBufSize sort memory buf size, for check if heap sort is applicable
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
|
@ -73,6 +73,8 @@ SSortHandle* tsortCreateSortHandle(SArray* pOrderInfo, int32_t type, int32_t pag
|
||||||
SSDataBlock* pBlock, const char* idstr, uint64_t maxRows, uint32_t maxTupleLength,
|
SSDataBlock* pBlock, const char* idstr, uint64_t maxRows, uint32_t maxTupleLength,
|
||||||
uint32_t sortBufSize);
|
uint32_t sortBufSize);
|
||||||
|
|
||||||
|
void tsortSetForceUsePQSort(SSortHandle* pHandle);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* @param pSortHandle
|
* @param pSortHandle
|
||||||
|
|
|
@ -54,8 +54,8 @@ typedef struct SDataDispatchHandle {
|
||||||
// clang-format off
|
// clang-format off
|
||||||
// data format:
|
// data format:
|
||||||
// +----------------+------------------+--------------+--------------+------------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+
|
// +----------------+------------------+--------------+--------------+------------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+
|
||||||
// |SDataCacheEntry | version | total length | numOfRows | group id | col1_schema | col2_schema | col3_schema... | column#1 length, column#2 length...| col1 bitmap | col1 data | col2 bitmap | col2 data | .... | | (4 bytes) |(8 bytes)
|
// |SDataCacheEntry | version | total length | numOfRows | group id | col1_schema | col2_schema | col3_schema... | column#1 length, column#2 length...| col1 bitmap | col1 data | col2 bitmap | col2 data |
|
||||||
// | | sizeof(int32_t) |sizeof(int32) | sizeof(int32)| sizeof(uint64_t) | (sizeof(int8_t)+sizeof(int32_t))*numOfCols | sizeof(int32_t) * numOfCols | actual size | |
|
// | | sizeof(int32_t) |sizeof(int32) | sizeof(int32)| sizeof(uint64_t) | (sizeof(int8_t)+sizeof(int32_t))*numOfCols | sizeof(int32_t) * numOfCols | actual size | | |
|
||||||
// +----------------+------------------+--------------+--------------+------------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+
|
// +----------------+------------------+--------------+--------------+------------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+
|
||||||
// The length of bitmap is decided by number of rows of this data block, and the length of each column data is
|
// The length of bitmap is decided by number of rows of this data block, and the length of each column data is
|
||||||
// recorded in the first segment, next to the struct header
|
// recorded in the first segment, next to the struct header
|
||||||
|
|
|
@ -223,12 +223,6 @@ int32_t qSetStreamOpOpen(qTaskInfo_t tinfo) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
void qGetCheckpointVersion(qTaskInfo_t tinfo, int64_t* dataVer, int64_t* ckId) {
|
|
||||||
SExecTaskInfo* pTaskInfo = tinfo;
|
|
||||||
*dataVer = pTaskInfo->streamInfo.dataVersion;
|
|
||||||
*ckId = pTaskInfo->streamInfo.checkPointId;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type) {
|
int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type) {
|
||||||
if (tinfo == NULL) {
|
if (tinfo == NULL) {
|
||||||
return TSDB_CODE_APP_ERROR;
|
return TSDB_CODE_APP_ERROR;
|
||||||
|
|
|
@ -77,8 +77,7 @@ static void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock*
|
||||||
static void initCtxOutputBuffer(SqlFunctionCtx* pCtx, int32_t size);
|
static void initCtxOutputBuffer(SqlFunctionCtx* pCtx, int32_t size);
|
||||||
static void doApplyScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t order, int32_t scanFlag);
|
static void doApplyScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t order, int32_t scanFlag);
|
||||||
|
|
||||||
static void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoData* p, bool keep,
|
static void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoData* p, int32_t status);
|
||||||
int32_t status);
|
|
||||||
static int32_t doSetInputDataBlock(SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t order, int32_t scanFlag,
|
static int32_t doSetInputDataBlock(SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t order, int32_t scanFlag,
|
||||||
bool createDummyCol);
|
bool createDummyCol);
|
||||||
static int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprSupp* pSup, SDiskbasedBuf* pBuf,
|
static int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprSupp* pSup, SDiskbasedBuf* pBuf,
|
||||||
|
@ -501,20 +500,26 @@ void clearResultRowInitFlag(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pColMatchInfo) {
|
int32_t doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pColMatchInfo) {
|
||||||
if (pFilterInfo == NULL || pBlock->info.rows == 0) {
|
if (pFilterInfo == NULL || pBlock->info.rows == 0) {
|
||||||
return;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
SFilterColumnParam param1 = {.numOfCols = taosArrayGetSize(pBlock->pDataBlock), .pDataBlock = pBlock->pDataBlock};
|
SFilterColumnParam param1 = {.numOfCols = taosArrayGetSize(pBlock->pDataBlock), .pDataBlock = pBlock->pDataBlock};
|
||||||
int32_t code = filterSetDataFromSlotId(pFilterInfo, ¶m1);
|
SColumnInfoData* p = NULL;
|
||||||
|
|
||||||
SColumnInfoData* p = NULL;
|
int32_t code = filterSetDataFromSlotId(pFilterInfo, ¶m1);
|
||||||
int32_t status = 0;
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
// todo the keep seems never to be True??
|
int32_t status = 0;
|
||||||
bool keep = filterExecute(pFilterInfo, pBlock, &p, NULL, param1.numOfCols, &status);
|
code = filterExecute(pFilterInfo, pBlock, &p, NULL, param1.numOfCols, &status);
|
||||||
extractQualifiedTupleByFilterResult(pBlock, p, keep, status);
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
extractQualifiedTupleByFilterResult(pBlock, p, status);
|
||||||
|
|
||||||
if (pColMatchInfo != NULL) {
|
if (pColMatchInfo != NULL) {
|
||||||
size_t size = taosArrayGetSize(pColMatchInfo->pList);
|
size_t size = taosArrayGetSize(pColMatchInfo->pList);
|
||||||
|
@ -529,23 +534,24 @@ void doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pCol
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
code = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
_err:
|
||||||
colDataDestroy(p);
|
colDataDestroy(p);
|
||||||
taosMemoryFree(p);
|
taosMemoryFree(p);
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoData* p, bool keep, int32_t status) {
|
void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoData* p, int32_t status) {
|
||||||
if (keep) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
int8_t* pIndicator = (int8_t*)p->pData;
|
int8_t* pIndicator = (int8_t*)p->pData;
|
||||||
if (status == FILTER_RESULT_ALL_QUALIFIED) {
|
if (status == FILTER_RESULT_ALL_QUALIFIED) {
|
||||||
// here nothing needs to be done
|
// here nothing needs to be done
|
||||||
} else if (status == FILTER_RESULT_NONE_QUALIFIED) {
|
} else if (status == FILTER_RESULT_NONE_QUALIFIED) {
|
||||||
pBlock->info.rows = 0;
|
pBlock->info.rows = 0;
|
||||||
|
} else if (status == FILTER_RESULT_PARTIAL_QUALIFIED) {
|
||||||
|
trimDataBlock(pBlock, pBlock->info.rows, (bool*)pIndicator);
|
||||||
} else {
|
} else {
|
||||||
trimDataBlock(pBlock, pBlock->info.rows, (bool*) pIndicator);
|
qError("unknown filter result type: %d", status);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -587,7 +593,7 @@ void copyResultrowToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResultR
|
||||||
pCtx[j].resultInfo->numOfRes = pRow->numOfRows;
|
pCtx[j].resultInfo->numOfRes = pRow->numOfRows;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
blockDataEnsureCapacity(pBlock, pBlock->info.rows + pCtx[j].resultInfo->numOfRes);
|
blockDataEnsureCapacity(pBlock, pBlock->info.rows + pCtx[j].resultInfo->numOfRes);
|
||||||
int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
|
int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
|
||||||
if (TAOS_FAILED(code)) {
|
if (TAOS_FAILED(code)) {
|
||||||
|
@ -1062,5 +1068,5 @@ void streamOpReloadState(SOperatorInfo* pOperator) {
|
||||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||||
if (downstream->fpSet.reloadStreamStateFn) {
|
if (downstream->fpSet.reloadStreamStateFn) {
|
||||||
downstream->fpSet.reloadStreamStateFn(downstream);
|
downstream->fpSet.reloadStreamStateFn(downstream);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -468,7 +468,7 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR
|
||||||
SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode;
|
SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode;
|
||||||
pOptr = createIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo);
|
pOptr = createIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo);
|
||||||
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL == type) {
|
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL == type) {
|
||||||
pOptr = createStreamIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo);
|
pOptr = createStreamIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, pHandle);
|
||||||
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == type) {
|
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == type) {
|
||||||
SMergeAlignedIntervalPhysiNode* pIntervalPhyNode = (SMergeAlignedIntervalPhysiNode*)pPhyNode;
|
SMergeAlignedIntervalPhysiNode* pIntervalPhyNode = (SMergeAlignedIntervalPhysiNode*)pPhyNode;
|
||||||
pOptr = createMergeAlignedIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo);
|
pOptr = createMergeAlignedIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo);
|
||||||
|
@ -477,10 +477,10 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR
|
||||||
pOptr = createMergeIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo);
|
pOptr = createMergeIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo);
|
||||||
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL == type) {
|
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL == type) {
|
||||||
int32_t children = 0;
|
int32_t children = 0;
|
||||||
pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
|
pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children, pHandle);
|
||||||
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL == type) {
|
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL == type) {
|
||||||
int32_t children = pHandle->numOfVgroups;
|
int32_t children = pHandle->numOfVgroups;
|
||||||
pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
|
pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children, pHandle);
|
||||||
} else if (QUERY_NODE_PHYSICAL_PLAN_SORT == type) {
|
} else if (QUERY_NODE_PHYSICAL_PLAN_SORT == type) {
|
||||||
pOptr = createSortOperatorInfo(ops[0], (SSortPhysiNode*)pPhyNode, pTaskInfo);
|
pOptr = createSortOperatorInfo(ops[0], (SSortPhysiNode*)pPhyNode, pTaskInfo);
|
||||||
} else if (QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT == type) {
|
} else if (QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT == type) {
|
||||||
|
|
|
@ -38,7 +38,7 @@ typedef struct SIndefOperatorInfo {
|
||||||
SSDataBlock* pNextGroupRes;
|
SSDataBlock* pNextGroupRes;
|
||||||
} SIndefOperatorInfo;
|
} SIndefOperatorInfo;
|
||||||
|
|
||||||
static SSDataBlock* doGenerateSourceData(SOperatorInfo* pOperator);
|
static int32_t doGenerateSourceData(SOperatorInfo* pOperator);
|
||||||
static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator);
|
static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator);
|
||||||
static SSDataBlock* doApplyIndefinitFunction(SOperatorInfo* pOperator);
|
static SSDataBlock* doApplyIndefinitFunction(SOperatorInfo* pOperator);
|
||||||
static SArray* setRowTsColumnOutputInfo(SqlFunctionCtx* pCtx, int32_t numOfCols);
|
static SArray* setRowTsColumnOutputInfo(SqlFunctionCtx* pCtx, int32_t numOfCols);
|
||||||
|
@ -215,7 +215,7 @@ static int32_t setInfoForNewGroup(SSDataBlock* pBlock, SLimitInfo* pLimitInfo, S
|
||||||
if (newGroup) {
|
if (newGroup) {
|
||||||
resetLimitInfoForNextGroup(pLimitInfo);
|
resetLimitInfoForNextGroup(pLimitInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
return PROJECT_RETRIEVE_CONTINUE;
|
return PROJECT_RETRIEVE_CONTINUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -267,7 +267,12 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
|
||||||
SLimitInfo* pLimitInfo = &pProjectInfo->limitInfo;
|
SLimitInfo* pLimitInfo = &pProjectInfo->limitInfo;
|
||||||
|
|
||||||
if (downstream == NULL) {
|
if (downstream == NULL) {
|
||||||
return doGenerateSourceData(pOperator);
|
code = doGenerateSourceData(pOperator);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
T_LONG_JMP(pTaskInfo->env, code);
|
||||||
|
}
|
||||||
|
|
||||||
|
return (pRes->info.rows > 0) ? pRes : NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
|
@ -616,7 +621,7 @@ SArray* setRowTsColumnOutputInfo(SqlFunctionCtx* pCtx, int32_t numOfCols) {
|
||||||
return pList;
|
return pList;
|
||||||
}
|
}
|
||||||
|
|
||||||
SSDataBlock* doGenerateSourceData(SOperatorInfo* pOperator) {
|
int32_t doGenerateSourceData(SOperatorInfo* pOperator) {
|
||||||
SProjectOperatorInfo* pProjectInfo = pOperator->info;
|
SProjectOperatorInfo* pProjectInfo = pOperator->info;
|
||||||
|
|
||||||
SExprSupp* pSup = &pOperator->exprSupp;
|
SExprSupp* pSup = &pOperator->exprSupp;
|
||||||
|
@ -630,14 +635,45 @@ SSDataBlock* doGenerateSourceData(SOperatorInfo* pOperator) {
|
||||||
for (int32_t k = 0; k < pSup->numOfExprs; ++k) {
|
for (int32_t k = 0; k < pSup->numOfExprs; ++k) {
|
||||||
int32_t outputSlotId = pExpr[k].base.resSchema.slotId;
|
int32_t outputSlotId = pExpr[k].base.resSchema.slotId;
|
||||||
|
|
||||||
ASSERT(pExpr[k].pExpr->nodeType == QUERY_NODE_VALUE);
|
if (pExpr[k].pExpr->nodeType == QUERY_NODE_VALUE) {
|
||||||
SColumnInfoData* pColInfoData = taosArrayGet(pRes->pDataBlock, outputSlotId);
|
SColumnInfoData* pColInfoData = taosArrayGet(pRes->pDataBlock, outputSlotId);
|
||||||
|
|
||||||
int32_t type = pExpr[k].base.pParam[0].param.nType;
|
int32_t type = pExpr[k].base.pParam[0].param.nType;
|
||||||
if (TSDB_DATA_TYPE_NULL == type) {
|
if (TSDB_DATA_TYPE_NULL == type) {
|
||||||
colDataSetNNULL(pColInfoData, 0, 1);
|
colDataSetNNULL(pColInfoData, 0, 1);
|
||||||
|
} else {
|
||||||
|
colDataSetVal(pColInfoData, 0, taosVariantGet(&pExpr[k].base.pParam[0].param, type), false);
|
||||||
|
}
|
||||||
|
} else if (pExpr[k].pExpr->nodeType == QUERY_NODE_FUNCTION) {
|
||||||
|
SqlFunctionCtx* pfCtx = &pSup->pCtx[k];
|
||||||
|
|
||||||
|
// UDF scalar functions will be calculated here, for example, select foo(n) from (select 1 n).
|
||||||
|
// UDF aggregate functions will be handled in agg operator.
|
||||||
|
if (fmIsScalarFunc(pfCtx->functionId)) {
|
||||||
|
SArray* pBlockList = taosArrayInit(4, POINTER_BYTES);
|
||||||
|
taosArrayPush(pBlockList, &pRes);
|
||||||
|
|
||||||
|
SColumnInfoData* pResColData = taosArrayGet(pRes->pDataBlock, outputSlotId);
|
||||||
|
SColumnInfoData idata = {.info = pResColData->info, .hasNull = true};
|
||||||
|
|
||||||
|
SScalarParam dest = {.columnData = &idata};
|
||||||
|
int32_t code = scalarCalculate((SNode*)pExpr[k].pExpr->_function.pFunctNode, pBlockList, &dest);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
taosArrayDestroy(pBlockList);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t startOffset = pRes->info.rows;
|
||||||
|
ASSERT(pRes->info.capacity > 0);
|
||||||
|
colDataAssign(pResColData, &idata, dest.numOfRows, &pRes->info);
|
||||||
|
colDataDestroy(&idata);
|
||||||
|
|
||||||
|
taosArrayDestroy(pBlockList);
|
||||||
|
} else {
|
||||||
|
return TSDB_CODE_OPS_NOT_SUPPORT;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
colDataSetVal(pColInfoData, 0, taosVariantGet(&pExpr[k].base.pParam[0].param, type), false);
|
return TSDB_CODE_OPS_NOT_SUPPORT;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -653,7 +689,7 @@ SSDataBlock* doGenerateSourceData(SOperatorInfo* pOperator) {
|
||||||
pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
|
pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return (pRes->info.rows > 0) ? pRes : NULL;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void setPseudoOutputColInfo(SSDataBlock* pResult, SqlFunctionCtx* pCtx, SArray* pPseudoList) {
|
static void setPseudoOutputColInfo(SSDataBlock* pResult, SqlFunctionCtx* pCtx, SArray* pPseudoList) {
|
||||||
|
|
|
@ -402,9 +402,10 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca
|
||||||
pCost->totalRows -= pBlock->info.rows;
|
pCost->totalRows -= pBlock->info.rows;
|
||||||
|
|
||||||
if (pOperator->exprSupp.pFilterInfo != NULL) {
|
if (pOperator->exprSupp.pFilterInfo != NULL) {
|
||||||
int64_t st = taosGetTimestampUs();
|
int32_t code = doFilter(pBlock, pOperator->exprSupp.pFilterInfo, &pTableScanInfo->matchInfo);
|
||||||
doFilter(pBlock, pOperator->exprSupp.pFilterInfo, &pTableScanInfo->matchInfo);
|
if (code != TSDB_CODE_SUCCESS) return code;
|
||||||
|
|
||||||
|
int64_t st = taosGetTimestampUs();
|
||||||
double el = (taosGetTimestampUs() - st) / 1000.0;
|
double el = (taosGetTimestampUs() - st) / 1000.0;
|
||||||
pTableScanInfo->readRecorder.filterTime += el;
|
pTableScanInfo->readRecorder.filterTime += el;
|
||||||
|
|
||||||
|
@ -2921,7 +2922,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
|
||||||
} else if (kWay <= 2) {
|
} else if (kWay <= 2) {
|
||||||
kWay = 2;
|
kWay = 2;
|
||||||
} else {
|
} else {
|
||||||
int i = 2;
|
int i = 2;
|
||||||
while (i * 2 <= kWay) i = i * 2;
|
while (i * 2 <= kWay) i = i * 2;
|
||||||
kWay = i;
|
kWay = i;
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,7 +55,11 @@ SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode*
|
||||||
pOperator->exprSupp.pExprInfo = createExprInfo(pSortNode->pExprs, NULL, &numOfCols);
|
pOperator->exprSupp.pExprInfo = createExprInfo(pSortNode->pExprs, NULL, &numOfCols);
|
||||||
pOperator->exprSupp.numOfExprs = numOfCols;
|
pOperator->exprSupp.numOfExprs = numOfCols;
|
||||||
calcSortOperMaxTupleLength(pInfo, pSortNode->pSortKeys);
|
calcSortOperMaxTupleLength(pInfo, pSortNode->pSortKeys);
|
||||||
pInfo->maxRows = pSortNode->maxRows;
|
pInfo->maxRows = -1;
|
||||||
|
if (pSortNode->node.pLimit) {
|
||||||
|
SLimitNode* pLimit = (SLimitNode*)pSortNode->node.pLimit;
|
||||||
|
if (pLimit->limit > 0) pInfo->maxRows = pLimit->limit;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t numOfOutputCols = 0;
|
int32_t numOfOutputCols = 0;
|
||||||
int32_t code =
|
int32_t code =
|
||||||
|
@ -718,7 +722,7 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData
|
||||||
resetLimitInfoForNextGroup(&pInfo->limitInfo);
|
resetLimitInfoForNextGroup(&pInfo->limitInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (p->info.rows > 0) {
|
if (p->info.rows > 0 || limitReached) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2318,11 +2318,6 @@ static int32_t getNextQualifiedFinalWindow(SInterval* pInterval, STimeWindow* pN
|
||||||
return startPos;
|
return startPos;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void setStreamDataVersion(SExecTaskInfo* pTaskInfo, int64_t version, int64_t ckId) {
|
|
||||||
pTaskInfo->streamInfo.dataVersion = version;
|
|
||||||
pTaskInfo->streamInfo.checkPointId = ckId;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, uint64_t groupId,
|
static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, uint64_t groupId,
|
||||||
SSHashObj* pUpdatedMap) {
|
SSHashObj* pUpdatedMap) {
|
||||||
SStreamIntervalOperatorInfo* pInfo = (SStreamIntervalOperatorInfo*)pOperatorInfo->info;
|
SStreamIntervalOperatorInfo* pInfo = (SStreamIntervalOperatorInfo*)pOperatorInfo->info;
|
||||||
|
@ -2823,7 +2818,6 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
} else if (pBlock->info.type == STREAM_CHECKPOINT) {
|
} else if (pBlock->info.type == STREAM_CHECKPOINT) {
|
||||||
doStreamIntervalSaveCheckpoint(pOperator);
|
doStreamIntervalSaveCheckpoint(pOperator);
|
||||||
pAPI->stateStore.streamStateCommit(pInfo->pState);
|
pAPI->stateStore.streamStateCommit(pInfo->pState);
|
||||||
setStreamDataVersion(pTaskInfo, pInfo->dataVersion, pInfo->pState->checkPointId);
|
|
||||||
copyDataBlock(pInfo->pCheckpointRes, pBlock);
|
copyDataBlock(pInfo->pCheckpointRes, pBlock);
|
||||||
pOperator->status = OP_RES_TO_RETURN;
|
pOperator->status = OP_RES_TO_RETURN;
|
||||||
qDebug("===stream===return data:%s. recv datablock num:%" PRIu64,
|
qDebug("===stream===return data:%s. recv datablock num:%" PRIu64,
|
||||||
|
@ -2965,7 +2959,7 @@ void streamIntervalReloadState(SOperatorInfo* pOperator) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
|
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
|
||||||
SExecTaskInfo* pTaskInfo, int32_t numOfChild) {
|
SExecTaskInfo* pTaskInfo, int32_t numOfChild, SReadHandle* pHandle) {
|
||||||
SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode;
|
SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode;
|
||||||
SStreamIntervalOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamIntervalOperatorInfo));
|
SStreamIntervalOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamIntervalOperatorInfo));
|
||||||
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
||||||
|
@ -3056,8 +3050,9 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
|
||||||
pInfo->pUpdated = NULL;
|
pInfo->pUpdated = NULL;
|
||||||
pInfo->pUpdatedMap = NULL;
|
pInfo->pUpdatedMap = NULL;
|
||||||
int32_t funResSize= getMaxFunResSize(&pOperator->exprSupp, numOfCols);
|
int32_t funResSize= getMaxFunResSize(&pOperator->exprSupp, numOfCols);
|
||||||
pInfo->pState->pFileState = pAPI->stateStore.streamFileStateInit(tsStreamBufferSize, sizeof(SWinKey), pInfo->aggSup.resultRowSize, funResSize,
|
pInfo->pState->pFileState = pAPI->stateStore.streamFileStateInit(
|
||||||
compareTs, pInfo->pState, pInfo->twAggSup.deleteMark, GET_TASKID(pTaskInfo));
|
tsStreamBufferSize, sizeof(SWinKey), pInfo->aggSup.resultRowSize, funResSize, compareTs, pInfo->pState,
|
||||||
|
pInfo->twAggSup.deleteMark, GET_TASKID(pTaskInfo), pTaskInfo->streamInfo.snapshotVer);
|
||||||
pInfo->dataVersion = 0;
|
pInfo->dataVersion = 0;
|
||||||
pInfo->stateStore = pTaskInfo->storageAPI.stateStore;
|
pInfo->stateStore = pTaskInfo->storageAPI.stateStore;
|
||||||
pInfo->recvGetAll = false;
|
pInfo->recvGetAll = false;
|
||||||
|
@ -3086,7 +3081,6 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
|
||||||
if (res == TSDB_CODE_SUCCESS) {
|
if (res == TSDB_CODE_SUCCESS) {
|
||||||
doStreamIntervalDecodeOpState(buff, pOperator);
|
doStreamIntervalDecodeOpState(buff, pOperator);
|
||||||
taosMemoryFree(buff);
|
taosMemoryFree(buff);
|
||||||
setStreamDataVersion(pTaskInfo, pInfo->dataVersion, pInfo->pState->checkPointId);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return pOperator;
|
return pOperator;
|
||||||
|
@ -3953,7 +3947,6 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
|
||||||
} else if (pBlock->info.type == STREAM_CHECKPOINT) {
|
} else if (pBlock->info.type == STREAM_CHECKPOINT) {
|
||||||
doStreamSessionSaveCheckpoint(pOperator);
|
doStreamSessionSaveCheckpoint(pOperator);
|
||||||
pAggSup->stateStore.streamStateCommit(pAggSup->pState);
|
pAggSup->stateStore.streamStateCommit(pAggSup->pState);
|
||||||
setStreamDataVersion(pOperator->pTaskInfo, pInfo->dataVersion, pAggSup->pState->checkPointId);
|
|
||||||
copyDataBlock(pInfo->pCheckpointRes, pBlock);
|
copyDataBlock(pInfo->pCheckpointRes, pBlock);
|
||||||
continue;
|
continue;
|
||||||
} else {
|
} else {
|
||||||
|
@ -4154,7 +4147,6 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
|
||||||
if (res == TSDB_CODE_SUCCESS) {
|
if (res == TSDB_CODE_SUCCESS) {
|
||||||
doStreamSessionDecodeOpState(buff, pOperator);
|
doStreamSessionDecodeOpState(buff, pOperator);
|
||||||
taosMemoryFree(buff);
|
taosMemoryFree(buff);
|
||||||
setStreamDataVersion(pTaskInfo, pInfo->dataVersion, pInfo->streamAggSup.pState->checkPointId);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
setOperatorInfo(pOperator, "StreamSessionWindowAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION, true,
|
setOperatorInfo(pOperator, "StreamSessionWindowAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION, true,
|
||||||
|
@ -4256,7 +4248,6 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
|
||||||
} else if (pBlock->info.type == STREAM_CHECKPOINT) {
|
} else if (pBlock->info.type == STREAM_CHECKPOINT) {
|
||||||
doStreamSessionSaveCheckpoint(pOperator);
|
doStreamSessionSaveCheckpoint(pOperator);
|
||||||
pAggSup->stateStore.streamStateCommit(pAggSup->pState);
|
pAggSup->stateStore.streamStateCommit(pAggSup->pState);
|
||||||
setStreamDataVersion(pOperator->pTaskInfo, pInfo->dataVersion, pAggSup->pState->checkPointId);
|
|
||||||
pOperator->status = OP_RES_TO_RETURN;
|
pOperator->status = OP_RES_TO_RETURN;
|
||||||
continue;
|
continue;
|
||||||
} else {
|
} else {
|
||||||
|
@ -4681,7 +4672,6 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
|
||||||
} else if (pBlock->info.type == STREAM_CHECKPOINT) {
|
} else if (pBlock->info.type == STREAM_CHECKPOINT) {
|
||||||
doStreamSessionSaveCheckpoint(pOperator);
|
doStreamSessionSaveCheckpoint(pOperator);
|
||||||
pInfo->streamAggSup.stateStore.streamStateCommit(pInfo->streamAggSup.pState);
|
pInfo->streamAggSup.stateStore.streamStateCommit(pInfo->streamAggSup.pState);
|
||||||
setStreamDataVersion(pOperator->pTaskInfo, pInfo->dataVersion, pInfo->streamAggSup.pState->checkPointId);
|
|
||||||
copyDataBlock(pInfo->pCheckpointRes, pBlock);
|
copyDataBlock(pInfo->pCheckpointRes, pBlock);
|
||||||
continue;
|
continue;
|
||||||
} else {
|
} else {
|
||||||
|
@ -4878,7 +4868,6 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
|
||||||
if (res == TSDB_CODE_SUCCESS) {
|
if (res == TSDB_CODE_SUCCESS) {
|
||||||
doStreamStateDecodeOpState(buff, pOperator);
|
doStreamStateDecodeOpState(buff, pOperator);
|
||||||
taosMemoryFree(buff);
|
taosMemoryFree(buff);
|
||||||
setStreamDataVersion(pTaskInfo, pInfo->dataVersion, pInfo->streamAggSup.pState->checkPointId);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
setOperatorInfo(pOperator, "StreamStateAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE, true, OP_NOT_OPENED,
|
setOperatorInfo(pOperator, "StreamStateAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE, true, OP_NOT_OPENED,
|
||||||
|
@ -5548,7 +5537,6 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
} else if (pBlock->info.type == STREAM_CHECKPOINT) {
|
} else if (pBlock->info.type == STREAM_CHECKPOINT) {
|
||||||
doStreamIntervalSaveCheckpoint(pOperator);
|
doStreamIntervalSaveCheckpoint(pOperator);
|
||||||
pAPI->stateStore.streamStateCommit(pInfo->pState);
|
pAPI->stateStore.streamStateCommit(pInfo->pState);
|
||||||
setStreamDataVersion(pTaskInfo, pInfo->dataVersion, pInfo->pState->checkPointId);
|
|
||||||
pInfo->reCkBlock = true;
|
pInfo->reCkBlock = true;
|
||||||
copyDataBlock(pInfo->pCheckpointRes, pBlock);
|
copyDataBlock(pInfo->pCheckpointRes, pBlock);
|
||||||
qDebug("===stream===return data:single interval. recv datablock num:%" PRIu64, pInfo->numOfDatapack);
|
qDebug("===stream===return data:single interval. recv datablock num:%" PRIu64, pInfo->numOfDatapack);
|
||||||
|
@ -5626,7 +5614,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
|
SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
|
||||||
SExecTaskInfo* pTaskInfo) {
|
SExecTaskInfo* pTaskInfo, SReadHandle* pHandle) {
|
||||||
SStreamIntervalOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamIntervalOperatorInfo));
|
SStreamIntervalOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamIntervalOperatorInfo));
|
||||||
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
||||||
if (pInfo == NULL || pOperator == NULL) {
|
if (pInfo == NULL || pOperator == NULL) {
|
||||||
|
@ -5716,7 +5704,7 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
|
||||||
|
|
||||||
pInfo->pState->pFileState = pTaskInfo->storageAPI.stateStore.streamFileStateInit(
|
pInfo->pState->pFileState = pTaskInfo->storageAPI.stateStore.streamFileStateInit(
|
||||||
tsStreamBufferSize, sizeof(SWinKey), pInfo->aggSup.resultRowSize, funResSize, compareTs, pInfo->pState,
|
tsStreamBufferSize, sizeof(SWinKey), pInfo->aggSup.resultRowSize, funResSize, compareTs, pInfo->pState,
|
||||||
pInfo->twAggSup.deleteMark, GET_TASKID(pTaskInfo));
|
pInfo->twAggSup.deleteMark, GET_TASKID(pTaskInfo), pTaskInfo->streamInfo.snapshotVer);
|
||||||
|
|
||||||
setOperatorInfo(pOperator, "StreamIntervalOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, true, OP_NOT_OPENED,
|
setOperatorInfo(pOperator, "StreamIntervalOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, true, OP_NOT_OPENED,
|
||||||
pInfo, pTaskInfo);
|
pInfo, pTaskInfo);
|
||||||
|
@ -5735,7 +5723,6 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
|
||||||
if (res == TSDB_CODE_SUCCESS) {
|
if (res == TSDB_CODE_SUCCESS) {
|
||||||
doStreamIntervalDecodeOpState(buff, pOperator);
|
doStreamIntervalDecodeOpState(buff, pOperator);
|
||||||
taosMemoryFree(buff);
|
taosMemoryFree(buff);
|
||||||
setStreamDataVersion(pTaskInfo, pInfo->dataVersion, pInfo->pState->checkPointId);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
initIntervalDownStream(downstream, pPhyNode->type, pInfo);
|
initIntervalDownStream(downstream, pPhyNode->type, pInfo);
|
||||||
|
|
|
@ -45,6 +45,7 @@ struct SSortHandle {
|
||||||
uint64_t maxRows;
|
uint64_t maxRows;
|
||||||
uint32_t maxTupleLength;
|
uint32_t maxTupleLength;
|
||||||
uint32_t sortBufSize;
|
uint32_t sortBufSize;
|
||||||
|
bool forceUsePQSort;
|
||||||
BoundedQueue* pBoundedQueue;
|
BoundedQueue* pBoundedQueue;
|
||||||
uint32_t tmpRowIdx;
|
uint32_t tmpRowIdx;
|
||||||
|
|
||||||
|
@ -73,7 +74,7 @@ static void* createTuple(uint32_t columnNum, uint32_t tupleLen) {
|
||||||
uint32_t totalLen = sizeof(uint32_t) * columnNum + BitmapLen(columnNum) + tupleLen;
|
uint32_t totalLen = sizeof(uint32_t) * columnNum + BitmapLen(columnNum) + tupleLen;
|
||||||
return taosMemoryCalloc(1, totalLen);
|
return taosMemoryCalloc(1, totalLen);
|
||||||
}
|
}
|
||||||
static void destoryTuple(void* t) { taosMemoryFree(t); }
|
static void destoryAllocatedTuple(void* t) { taosMemoryFree(t); }
|
||||||
|
|
||||||
#define tupleOffset(tuple, colIdx) ((uint32_t*)(tuple + sizeof(uint32_t) * colIdx))
|
#define tupleOffset(tuple, colIdx) ((uint32_t*)(tuple + sizeof(uint32_t) * colIdx))
|
||||||
#define tupleSetOffset(tuple, colIdx, offset) (*tupleOffset(tuple, colIdx) = offset)
|
#define tupleSetOffset(tuple, colIdx, offset) (*tupleOffset(tuple, colIdx) = offset)
|
||||||
|
@ -107,12 +108,65 @@ static void* tupleGetField(char* t, uint32_t colIdx, uint32_t colNum) {
|
||||||
return t + *tupleOffset(t, colIdx);
|
return t + *tupleOffset(t, colIdx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t colDataComparFn(const void* pLeft, const void* pRight, void* param);
|
|
||||||
|
|
||||||
SSDataBlock* tsortGetSortedDataBlock(const SSortHandle* pSortHandle) {
|
SSDataBlock* tsortGetSortedDataBlock(const SSortHandle* pSortHandle) {
|
||||||
return createOneDataBlock(pSortHandle->pDataBlock, false);
|
return createOneDataBlock(pSortHandle->pDataBlock, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define AllocatedTupleType 0
|
||||||
|
#define ReferencedTupleType 1 // tuple references to one row in pDataBlock
|
||||||
|
typedef struct TupleDesc {
|
||||||
|
uint8_t type;
|
||||||
|
char* data; // if type is AllocatedTuple, then points to the created tuple, otherwise points to the DataBlock
|
||||||
|
} TupleDesc;
|
||||||
|
|
||||||
|
typedef struct ReferencedTuple {
|
||||||
|
TupleDesc desc;
|
||||||
|
size_t rowIndex;
|
||||||
|
} ReferencedTuple;
|
||||||
|
|
||||||
|
static TupleDesc* createAllocatedTuple(SSDataBlock* pBlock, size_t colNum, uint32_t tupleLen, size_t rowIdx) {
|
||||||
|
TupleDesc* t = taosMemoryCalloc(1, sizeof(TupleDesc));
|
||||||
|
void* pTuple = createTuple(colNum, tupleLen);
|
||||||
|
if (!pTuple) {
|
||||||
|
taosMemoryFree(t);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
size_t colLen = 0;
|
||||||
|
uint32_t offset = tupleGetDataStartOffset(colNum);
|
||||||
|
for (size_t colIdx = 0; colIdx < colNum; ++colIdx) {
|
||||||
|
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, colIdx);
|
||||||
|
if (colDataIsNull_s(pCol, rowIdx)) {
|
||||||
|
offset = tupleAddField((char**)&pTuple, colNum, offset, colIdx, 0, 0, true, tupleLen);
|
||||||
|
} else {
|
||||||
|
colLen = colDataGetRowLength(pCol, rowIdx);
|
||||||
|
offset =
|
||||||
|
tupleAddField((char**)&pTuple, colNum, offset, colIdx, colDataGetData(pCol, rowIdx), colLen, false, tupleLen);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t->type = AllocatedTupleType;
|
||||||
|
t->data = pTuple;
|
||||||
|
return t;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* tupleDescGetField(const TupleDesc* pDesc, int32_t colIdx, uint32_t colNum) {
|
||||||
|
if (pDesc->type == ReferencedTupleType) {
|
||||||
|
ReferencedTuple* pRefTuple = (ReferencedTuple*)pDesc;
|
||||||
|
SColumnInfoData* pCol = taosArrayGet(((SSDataBlock*)pDesc->data)->pDataBlock, colIdx);
|
||||||
|
if (colDataIsNull_s(pCol, pRefTuple->rowIndex)) return NULL;
|
||||||
|
return colDataGetData(pCol, pRefTuple->rowIndex);
|
||||||
|
} else {
|
||||||
|
return tupleGetField(pDesc->data, colIdx, colNum);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void destroyTuple(void* t) {
|
||||||
|
TupleDesc* pDesc = t;
|
||||||
|
if (pDesc->type == AllocatedTupleType) {
|
||||||
|
destoryAllocatedTuple(pDesc->data);
|
||||||
|
taosMemoryFree(pDesc);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* @param type
|
* @param type
|
||||||
|
@ -130,11 +184,11 @@ SSortHandle* tsortCreateSortHandle(SArray* pSortInfo, int32_t type, int32_t page
|
||||||
pSortHandle->loops = 0;
|
pSortHandle->loops = 0;
|
||||||
|
|
||||||
pSortHandle->maxTupleLength = maxTupleLength;
|
pSortHandle->maxTupleLength = maxTupleLength;
|
||||||
if (maxRows < 0)
|
if (maxRows != 0) {
|
||||||
pSortHandle->sortBufSize = 0;
|
|
||||||
else
|
|
||||||
pSortHandle->sortBufSize = sortBufSize;
|
pSortHandle->sortBufSize = sortBufSize;
|
||||||
pSortHandle->maxRows = maxRows;
|
pSortHandle->maxRows = maxRows;
|
||||||
|
}
|
||||||
|
pSortHandle->forceUsePQSort = false;
|
||||||
|
|
||||||
if (pBlock != NULL) {
|
if (pBlock != NULL) {
|
||||||
pSortHandle->pDataBlock = createOneDataBlock(pBlock, false);
|
pSortHandle->pDataBlock = createOneDataBlock(pBlock, false);
|
||||||
|
@ -779,7 +833,7 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
|
||||||
|
|
||||||
int64_t el = taosGetTimestampUs() - p;
|
int64_t el = taosGetTimestampUs() - p;
|
||||||
pHandle->sortElapsed += el;
|
pHandle->sortElapsed += el;
|
||||||
|
if (pHandle->maxRows > 0) blockDataKeepFirstNRows(pHandle->pDataBlock, pHandle->maxRows);
|
||||||
code = doAddToBuf(pHandle->pDataBlock, pHandle);
|
code = doAddToBuf(pHandle->pDataBlock, pHandle);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
return code;
|
return code;
|
||||||
|
@ -804,6 +858,7 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pHandle->maxRows > 0) blockDataKeepFirstNRows(pHandle->pDataBlock, pHandle->maxRows);
|
||||||
int64_t el = taosGetTimestampUs() - p;
|
int64_t el = taosGetTimestampUs() - p;
|
||||||
pHandle->sortElapsed += el;
|
pHandle->sortElapsed += el;
|
||||||
|
|
||||||
|
@ -936,8 +991,17 @@ static STupleHandle* tsortBufMergeSortNextTuple(SSortHandle* pHandle) {
|
||||||
return &pHandle->tupleHandle;
|
return &pHandle->tupleHandle;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool tsortIsForceUsePQSort(SSortHandle* pHandle) {
|
||||||
|
return pHandle->forceUsePQSort == true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void tsortSetForceUsePQSort(SSortHandle* pHandle) {
|
||||||
|
pHandle->forceUsePQSort = true;
|
||||||
|
}
|
||||||
|
|
||||||
static bool tsortIsPQSortApplicable(SSortHandle* pHandle) {
|
static bool tsortIsPQSortApplicable(SSortHandle* pHandle) {
|
||||||
if (pHandle->type != SORT_SINGLESOURCE_SORT) return false;
|
if (pHandle->type != SORT_SINGLESOURCE_SORT) return false;
|
||||||
|
if (tsortIsForceUsePQSort(pHandle)) return true;
|
||||||
uint64_t maxRowsFitInMemory = pHandle->sortBufSize / (pHandle->maxTupleLength + sizeof(char*));
|
uint64_t maxRowsFitInMemory = pHandle->sortBufSize / (pHandle->maxTupleLength + sizeof(char*));
|
||||||
return maxRowsFitInMemory > pHandle->maxRows;
|
return maxRowsFitInMemory > pHandle->maxRows;
|
||||||
}
|
}
|
||||||
|
@ -956,16 +1020,17 @@ static bool tsortPQComFnReverse(void*a, void* b, void* param) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t colDataComparFn(const void* pLeft, const void* pRight, void* param) {
|
static int32_t tupleComparFn(const void* pLeft, const void* pRight, void* param) {
|
||||||
char* pLTuple = (char*)pLeft;
|
TupleDesc* pLeftDesc = (TupleDesc*)pLeft;
|
||||||
char* pRTuple = (char*)pRight;
|
TupleDesc* pRightDesc = (TupleDesc*)pRight;
|
||||||
|
|
||||||
SSortHandle* pHandle = (SSortHandle*)param;
|
SSortHandle* pHandle = (SSortHandle*)param;
|
||||||
SArray* orderInfo = (SArray*)pHandle->pSortInfo;
|
SArray* orderInfo = (SArray*)pHandle->pSortInfo;
|
||||||
uint32_t colNum = blockDataGetNumOfCols(pHandle->pDataBlock);
|
uint32_t colNum = blockDataGetNumOfCols(pHandle->pDataBlock);
|
||||||
for (int32_t i = 0; i < orderInfo->size; ++i) {
|
for (int32_t i = 0; i < orderInfo->size; ++i) {
|
||||||
SBlockOrderInfo* pOrder = TARRAY_GET_ELEM(orderInfo, i);
|
SBlockOrderInfo* pOrder = TARRAY_GET_ELEM(orderInfo, i);
|
||||||
void *lData = tupleGetField(pLTuple, pOrder->slotId, colNum);
|
void *lData = tupleDescGetField(pLeftDesc, pOrder->slotId, colNum);
|
||||||
void *rData = tupleGetField(pRTuple, pOrder->slotId, colNum);
|
void *rData = tupleDescGetField(pRightDesc, pOrder->slotId, colNum);
|
||||||
if (!lData && !rData) continue;
|
if (!lData && !rData) continue;
|
||||||
if (!lData) return pOrder->nullFirst ? -1 : 1;
|
if (!lData) return pOrder->nullFirst ? -1 : 1;
|
||||||
if (!rData) return pOrder->nullFirst ? 1 : -1;
|
if (!rData) return pOrder->nullFirst ? 1 : -1;
|
||||||
|
@ -984,9 +1049,9 @@ static int32_t colDataComparFn(const void* pLeft, const void* pRight, void* para
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t tsortOpenForPQSort(SSortHandle* pHandle) {
|
static int32_t tsortOpenForPQSort(SSortHandle* pHandle) {
|
||||||
pHandle->pBoundedQueue = createBoundedQueue(pHandle->maxRows, tsortPQCompFn, destoryTuple, pHandle);
|
pHandle->pBoundedQueue = createBoundedQueue(pHandle->maxRows, tsortPQCompFn, destroyTuple, pHandle);
|
||||||
if (NULL == pHandle->pBoundedQueue) return TSDB_CODE_OUT_OF_MEMORY;
|
if (NULL == pHandle->pBoundedQueue) return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
tsortSetComparFp(pHandle, colDataComparFn);
|
tsortSetComparFp(pHandle, tupleComparFn);
|
||||||
|
|
||||||
SSortSource** pSource = taosArrayGet(pHandle->pOrderedSource, 0);
|
SSortSource** pSource = taosArrayGet(pHandle->pOrderedSource, 0);
|
||||||
SSortSource* source = *pSource;
|
SSortSource* source = *pSource;
|
||||||
|
@ -1018,24 +1083,17 @@ static int32_t tsortOpenForPQSort(SSortHandle* pHandle) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
size_t colLen = 0;
|
ReferencedTuple refTuple = {.desc.data = (char*)pBlock, .desc.type = ReferencedTupleType, .rowIndex = 0};
|
||||||
for (size_t rowIdx = 0; rowIdx < pBlock->info.rows; ++rowIdx) {
|
for (size_t rowIdx = 0; rowIdx < pBlock->info.rows; ++rowIdx) {
|
||||||
void* pTuple = createTuple(colNum, tupleLen);
|
refTuple.rowIndex = rowIdx;
|
||||||
if (pTuple == NULL) return TSDB_CODE_OUT_OF_MEMORY;
|
pqNode.data = &refTuple;
|
||||||
|
PriorityQueueNode* pPushedNode = taosBQPush(pHandle->pBoundedQueue, &pqNode);
|
||||||
uint32_t offset = tupleGetDataStartOffset(colNum);
|
if (!pPushedNode) {
|
||||||
for (size_t colIdx = 0; colIdx < colNum; ++colIdx) {
|
// do nothing if push failed
|
||||||
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, colIdx);
|
} else {
|
||||||
if (colDataIsNull_s(pCol, rowIdx)) {
|
pPushedNode->data = createAllocatedTuple(pBlock, colNum, tupleLen, rowIdx);
|
||||||
offset = tupleAddField((char**)&pTuple, colNum, offset, colIdx, 0, 0, true, tupleLen);
|
if (pPushedNode->data == NULL) return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
} else {
|
|
||||||
colLen = colDataGetRowLength(pCol, rowIdx);
|
|
||||||
offset = tupleAddField((char**)&pTuple, colNum, offset, colIdx, colDataGetData(pCol, rowIdx), colLen, false,
|
|
||||||
tupleLen);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
pqNode.data = pTuple;
|
|
||||||
taosBQPush(pHandle->pBoundedQueue, &pqNode);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -1044,7 +1102,7 @@ static int32_t tsortOpenForPQSort(SSortHandle* pHandle) {
|
||||||
static STupleHandle* tsortPQSortNextTuple(SSortHandle* pHandle) {
|
static STupleHandle* tsortPQSortNextTuple(SSortHandle* pHandle) {
|
||||||
blockDataCleanup(pHandle->pDataBlock);
|
blockDataCleanup(pHandle->pDataBlock);
|
||||||
blockDataEnsureCapacity(pHandle->pDataBlock, 1);
|
blockDataEnsureCapacity(pHandle->pDataBlock, 1);
|
||||||
// abondan the top tuple if queue size bigger than max size
|
// abandon the top tuple if queue size bigger than max size
|
||||||
if (taosBQSize(pHandle->pBoundedQueue) == taosBQMaxSize(pHandle->pBoundedQueue) + 1) {
|
if (taosBQSize(pHandle->pBoundedQueue) == taosBQMaxSize(pHandle->pBoundedQueue) + 1) {
|
||||||
taosBQPop(pHandle->pBoundedQueue);
|
taosBQPop(pHandle->pBoundedQueue);
|
||||||
}
|
}
|
||||||
|
@ -1056,7 +1114,7 @@ static STupleHandle* tsortPQSortNextTuple(SSortHandle* pHandle) {
|
||||||
if (taosBQSize(pHandle->pBoundedQueue) > 0) {
|
if (taosBQSize(pHandle->pBoundedQueue) > 0) {
|
||||||
uint32_t colNum = blockDataGetNumOfCols(pHandle->pDataBlock);
|
uint32_t colNum = blockDataGetNumOfCols(pHandle->pDataBlock);
|
||||||
PriorityQueueNode* node = taosBQTop(pHandle->pBoundedQueue);
|
PriorityQueueNode* node = taosBQTop(pHandle->pBoundedQueue);
|
||||||
char* pTuple = (char*)node->data;
|
char* pTuple = ((TupleDesc*)node->data)->data;
|
||||||
|
|
||||||
for (uint32_t i = 0; i < colNum; ++i) {
|
for (uint32_t i = 0; i < colNum; ++i) {
|
||||||
void* pData = tupleGetField(pTuple, i, colNum);
|
void* pData = tupleGetField(pTuple, i, colNum);
|
||||||
|
|
|
@ -502,7 +502,6 @@ static int32_t logicSortCopy(const SSortLogicNode* pSrc, SSortLogicNode* pDst) {
|
||||||
COPY_BASE_OBJECT_FIELD(node, logicNodeCopy);
|
COPY_BASE_OBJECT_FIELD(node, logicNodeCopy);
|
||||||
CLONE_NODE_LIST_FIELD(pSortKeys);
|
CLONE_NODE_LIST_FIELD(pSortKeys);
|
||||||
COPY_SCALAR_FIELD(groupSort);
|
COPY_SCALAR_FIELD(groupSort);
|
||||||
COPY_SCALAR_FIELD(maxRows);
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2115,9 +2115,6 @@ static int32_t physiSortNodeToJson(const void* pObj, SJson* pJson) {
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = nodeListToJson(pJson, jkSortPhysiPlanTargets, pNode->pTargets);
|
code = nodeListToJson(pJson, jkSortPhysiPlanTargets, pNode->pTargets);
|
||||||
}
|
}
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
|
||||||
code = tjsonAddIntegerToObject(pJson, jkSortPhysiPlanMaxRows, pNode->maxRows);
|
|
||||||
}
|
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
@ -2135,9 +2132,6 @@ static int32_t jsonToPhysiSortNode(const SJson* pJson, void* pObj) {
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = jsonToNodeList(pJson, jkSortPhysiPlanTargets, &pNode->pTargets);
|
code = jsonToNodeList(pJson, jkSortPhysiPlanTargets, &pNode->pTargets);
|
||||||
}
|
}
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
|
||||||
code = tjsonGetBigIntValue(pJson, jkSortPhysiPlanMaxRows, &pNode->maxRows);
|
|
||||||
}
|
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2594,7 +2594,7 @@ static int32_t msgToPhysiMergeNode(STlvDecoder* pDecoder, void* pObj) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum { PHY_SORT_CODE_BASE_NODE = 1, PHY_SORT_CODE_EXPR, PHY_SORT_CODE_SORT_KEYS, PHY_SORT_CODE_TARGETS, PHY_SORT_CODE_MAX_ROWS };
|
enum { PHY_SORT_CODE_BASE_NODE = 1, PHY_SORT_CODE_EXPR, PHY_SORT_CODE_SORT_KEYS, PHY_SORT_CODE_TARGETS };
|
||||||
|
|
||||||
static int32_t physiSortNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
static int32_t physiSortNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
||||||
const SSortPhysiNode* pNode = (const SSortPhysiNode*)pObj;
|
const SSortPhysiNode* pNode = (const SSortPhysiNode*)pObj;
|
||||||
|
@ -2609,9 +2609,6 @@ static int32_t physiSortNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = tlvEncodeObj(pEncoder, PHY_SORT_CODE_TARGETS, nodeListToMsg, pNode->pTargets);
|
code = tlvEncodeObj(pEncoder, PHY_SORT_CODE_TARGETS, nodeListToMsg, pNode->pTargets);
|
||||||
}
|
}
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
|
||||||
code = tlvEncodeI64(pEncoder, PHY_SORT_CODE_MAX_ROWS, pNode->maxRows);
|
|
||||||
}
|
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
@ -2635,9 +2632,6 @@ static int32_t msgToPhysiSortNode(STlvDecoder* pDecoder, void* pObj) {
|
||||||
case PHY_SORT_CODE_TARGETS:
|
case PHY_SORT_CODE_TARGETS:
|
||||||
code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pTargets);
|
code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pTargets);
|
||||||
break;
|
break;
|
||||||
case PHY_SORT_CODE_MAX_ROWS:
|
|
||||||
code = tlvDecodeI64(pTlv, &pNode->maxRows);
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -907,6 +907,10 @@ void nodesDestroyNode(SNode* pNode) {
|
||||||
SCreateIndexStmt* pStmt = (SCreateIndexStmt*)pNode;
|
SCreateIndexStmt* pStmt = (SCreateIndexStmt*)pNode;
|
||||||
nodesDestroyNode((SNode*)pStmt->pOptions);
|
nodesDestroyNode((SNode*)pStmt->pOptions);
|
||||||
nodesDestroyList(pStmt->pCols);
|
nodesDestroyList(pStmt->pCols);
|
||||||
|
if (pStmt->pReq) {
|
||||||
|
tFreeSMCreateSmaReq(pStmt->pReq);
|
||||||
|
taosMemoryFreeClear(pStmt->pReq);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case QUERY_NODE_DROP_INDEX_STMT: // no pointer field
|
case QUERY_NODE_DROP_INDEX_STMT: // no pointer field
|
||||||
|
@ -1053,6 +1057,7 @@ void nodesDestroyNode(SNode* pNode) {
|
||||||
}
|
}
|
||||||
case QUERY_NODE_QUERY: {
|
case QUERY_NODE_QUERY: {
|
||||||
SQuery* pQuery = (SQuery*)pNode;
|
SQuery* pQuery = (SQuery*)pNode;
|
||||||
|
nodesDestroyNode(pQuery->pPrevRoot);
|
||||||
nodesDestroyNode(pQuery->pRoot);
|
nodesDestroyNode(pQuery->pRoot);
|
||||||
nodesDestroyNode(pQuery->pPostRoot);
|
nodesDestroyNode(pQuery->pPostRoot);
|
||||||
taosMemoryFreeClear(pQuery->pResSchema);
|
taosMemoryFreeClear(pQuery->pResSchema);
|
||||||
|
|
|
@ -35,6 +35,7 @@ int32_t translate(SParseContext* pParseCxt, SQuery* pQuery, SParseMetaCache* pMe
|
||||||
int32_t extractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema);
|
int32_t extractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema);
|
||||||
int32_t calculateConstant(SParseContext* pParseCxt, SQuery* pQuery);
|
int32_t calculateConstant(SParseContext* pParseCxt, SQuery* pQuery);
|
||||||
int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow);
|
int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow);
|
||||||
|
int32_t translatePostCreateSmaIndex(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -3520,6 +3520,10 @@ static int32_t translateWindow(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||||
if (NULL == pSelect->pWindow) {
|
if (NULL == pSelect->pWindow) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
if (pSelect->pFromTable->type == QUERY_NODE_REAL_TABLE &&
|
||||||
|
((SRealTableNode*)pSelect->pFromTable)->pMeta->tableType == TSDB_SYSTEM_TABLE) {
|
||||||
|
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED, "WINDOW");
|
||||||
|
}
|
||||||
pCxt->currClause = SQL_CLAUSE_WINDOW;
|
pCxt->currClause = SQL_CLAUSE_WINDOW;
|
||||||
int32_t code = translateExpr(pCxt, &pSelect->pWindow);
|
int32_t code = translateExpr(pCxt, &pSelect->pWindow);
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
@ -5803,6 +5807,15 @@ static int32_t buildCreateSmaReq(STranslateContext* pCxt, SCreateIndexStmt* pStm
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = getSmaIndexAst(pCxt, pStmt, &pReq->ast, &pReq->astLen, &pReq->expr, &pReq->exprLen);
|
code = getSmaIndexAst(pCxt, pStmt, &pReq->ast, &pReq->astLen, &pReq->expr, &pReq->exprLen);
|
||||||
}
|
}
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
STableMeta* pMetaCache = NULL;
|
||||||
|
code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pMetaCache);
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
pStmt->pOptions->tsPrecision = pMetaCache->tableInfo.precision;
|
||||||
|
code = createLastTsSelectStmt(pStmt->dbName, pStmt->tableName, pMetaCache, &pStmt->pPrevQuery);
|
||||||
|
}
|
||||||
|
taosMemoryFreeClear(pMetaCache);
|
||||||
|
}
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
@ -5828,15 +5841,60 @@ static int32_t checkCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pS
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t translateCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pStmt) {
|
static int32_t translateCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pStmt) {
|
||||||
SMCreateSmaReq createSmaReq = {0};
|
|
||||||
int32_t code = checkCreateSmaIndex(pCxt, pStmt);
|
int32_t code = checkCreateSmaIndex(pCxt, pStmt);
|
||||||
|
pStmt->pReq = taosMemoryCalloc(1, sizeof(SMCreateSmaReq));
|
||||||
|
if (pStmt->pReq == NULL) code = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = buildCreateSmaReq(pCxt, pStmt, &createSmaReq);
|
code = buildCreateSmaReq(pCxt, pStmt, pStmt->pReq);
|
||||||
|
}
|
||||||
|
TSWAP(pCxt->pPrevRoot, pStmt->pPrevQuery);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t createIntervalFromCreateSmaIndexStmt(SCreateIndexStmt* pStmt, SInterval* pInterval) {
|
||||||
|
pInterval->interval = ((SValueNode*)pStmt->pOptions->pInterval)->datum.i;
|
||||||
|
pInterval->intervalUnit = ((SValueNode*)pStmt->pOptions->pInterval)->unit;
|
||||||
|
pInterval->offset = NULL != pStmt->pOptions->pOffset ? ((SValueNode*)pStmt->pOptions->pOffset)->datum.i : 0;
|
||||||
|
pInterval->sliding = NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->datum.i : pInterval->interval;
|
||||||
|
pInterval->slidingUnit = NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->unit : pInterval->intervalUnit;
|
||||||
|
pInterval->precision = pStmt->pOptions->tsPrecision;
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t translatePostCreateSmaIndex(SParseContext* pParseCxt, SQuery* pQuery, void ** pResRow) {
|
||||||
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
SCreateIndexStmt* pStmt = (SCreateIndexStmt*)pQuery->pRoot;
|
||||||
|
int64_t lastTs = 0;
|
||||||
|
SInterval interval = {0};
|
||||||
|
STranslateContext pCxt = {0};
|
||||||
|
code = initTranslateContext(pParseCxt, NULL, &pCxt);
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
code = createIntervalFromCreateSmaIndexStmt(pStmt, &interval);
|
||||||
}
|
}
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = buildCmdMsg(pCxt, TDMT_MND_CREATE_SMA, (FSerializeFunc)tSerializeSMCreateSmaReq, &createSmaReq);
|
if (pResRow && pResRow[0]) {
|
||||||
|
lastTs = *(int64_t*)pResRow[0];
|
||||||
|
} else if (interval.interval > 0) {
|
||||||
|
lastTs = convertTimePrecision(taosGetTimestampMs(), TSDB_TIME_PRECISION_MILLI, interval.precision);
|
||||||
|
} else {
|
||||||
|
lastTs = taosGetTimestampMs();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
tFreeSMCreateSmaReq(&createSmaReq);
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
if (interval.interval > 0) {
|
||||||
|
pStmt->pReq->lastTs = taosTimeTruncate(lastTs, &interval);
|
||||||
|
} else {
|
||||||
|
pStmt->pReq->lastTs = lastTs;
|
||||||
|
}
|
||||||
|
code = buildCmdMsg(&pCxt, TDMT_MND_CREATE_SMA, (FSerializeFunc)tSerializeSMCreateSmaReq, pStmt->pReq);
|
||||||
|
}
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
code = setQuery(&pCxt, pQuery);
|
||||||
|
}
|
||||||
|
setRefreshMate(&pCxt, pQuery);
|
||||||
|
destroyTranslateContext(&pCxt);
|
||||||
|
tFreeSMCreateSmaReq(pStmt->pReq);
|
||||||
|
taosMemoryFreeClear(pStmt->pReq);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6989,7 +7047,7 @@ static int32_t translateCreateStream(STranslateContext* pCxt, SCreateStreamStmt*
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t buildIntervalForCreateStream(SCreateStreamStmt* pStmt, SInterval* pInterval) {
|
static int32_t buildIntervalForCreateStream(SCreateStreamStmt* pStmt, SInterval* pInterval) {
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
if (QUERY_NODE_SELECT_STMT != nodeType(pStmt->pQuery)) {
|
if (QUERY_NODE_SELECT_STMT != nodeType(pStmt->pQuery)) {
|
||||||
return code;
|
return code;
|
||||||
|
|
|
@ -172,6 +172,8 @@ static char* getSyntaxErrFormat(int32_t errCode) {
|
||||||
return "%s function is not supported in group query";
|
return "%s function is not supported in group query";
|
||||||
case TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC:
|
case TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC:
|
||||||
return "%s function is not supported in system table query";
|
return "%s function is not supported in system table query";
|
||||||
|
case TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED:
|
||||||
|
return "%s is not supported in system table query";
|
||||||
case TSDB_CODE_PAR_INVALID_INTERP_CLAUSE:
|
case TSDB_CODE_PAR_INVALID_INTERP_CLAUSE:
|
||||||
return "Invalid usage of RANGE clause, EVERY clause or FILL clause";
|
return "Invalid usage of RANGE clause, EVERY clause or FILL clause";
|
||||||
case TSDB_CODE_PAR_NO_VALID_FUNC_IN_WIN:
|
case TSDB_CODE_PAR_NO_VALID_FUNC_IN_WIN:
|
||||||
|
|
|
@ -227,6 +227,8 @@ int32_t qContinueParsePostQuery(SParseContext* pCxt, SQuery* pQuery, void** pRes
|
||||||
case QUERY_NODE_CREATE_STREAM_STMT:
|
case QUERY_NODE_CREATE_STREAM_STMT:
|
||||||
code = translatePostCreateStream(pCxt, pQuery, pResRow);
|
code = translatePostCreateStream(pCxt, pQuery, pResRow);
|
||||||
break;
|
break;
|
||||||
|
case QUERY_NODE_CREATE_INDEX_STMT:
|
||||||
|
code = translatePostCreateSmaIndex(pCxt, pQuery, pResRow);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -542,6 +542,18 @@ TEST_F(ParserInitialCTest, createSmaIndex) {
|
||||||
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
|
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
|
||||||
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_INDEX_STMT);
|
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_INDEX_STMT);
|
||||||
SMCreateSmaReq req = {0};
|
SMCreateSmaReq req = {0};
|
||||||
|
ASSERT_TRUE(pQuery->pPrevRoot);
|
||||||
|
ASSERT_EQ(QUERY_NODE_SELECT_STMT, nodeType(pQuery->pPrevRoot));
|
||||||
|
|
||||||
|
SCreateIndexStmt* pStmt = (SCreateIndexStmt*)pQuery->pRoot;
|
||||||
|
SCmdMsgInfo* pCmdMsg = (SCmdMsgInfo*)taosMemoryMalloc(sizeof(SCmdMsgInfo));
|
||||||
|
if (NULL == pCmdMsg) FAIL();
|
||||||
|
pCmdMsg->msgType = TDMT_MND_CREATE_SMA;
|
||||||
|
pCmdMsg->msgLen = tSerializeSMCreateSmaReq(NULL, 0, pStmt->pReq);
|
||||||
|
pCmdMsg->pMsg = taosMemoryMalloc(pCmdMsg->msgLen);
|
||||||
|
if (!pCmdMsg->pMsg) FAIL();
|
||||||
|
tSerializeSMCreateSmaReq(pCmdMsg->pMsg, pCmdMsg->msgLen, pStmt->pReq);
|
||||||
|
((SQuery*)pQuery)->pCmdMsg = pCmdMsg;
|
||||||
ASSERT_TRUE(TSDB_CODE_SUCCESS == tDeserializeSMCreateSmaReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req));
|
ASSERT_TRUE(TSDB_CODE_SUCCESS == tDeserializeSMCreateSmaReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req));
|
||||||
|
|
||||||
ASSERT_EQ(std::string(req.name), std::string(expect.name));
|
ASSERT_EQ(std::string(req.name), std::string(expect.name));
|
||||||
|
|
|
@ -291,4 +291,13 @@ TEST_F(ParserInitialDTest, dropUser) {
|
||||||
run("DROP USER wxy");
|
run("DROP USER wxy");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(ParserInitialDTest, IntervalOnSysTable) {
|
||||||
|
login("root");
|
||||||
|
run("SELECT count('reboot_time') FROM information_schema.ins_dnodes interval(14m) sliding(9m)",
|
||||||
|
TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED, PARSER_STAGE_TRANSLATE);
|
||||||
|
|
||||||
|
run("SELECT count('create_time') FROM information_schema.ins_qnodes interval(14m) sliding(9m)",
|
||||||
|
TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED, PARSER_STAGE_TRANSLATE);
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace ParserTest
|
} // namespace ParserTest
|
||||||
|
|
|
@ -1027,7 +1027,6 @@ static int32_t createSortLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
pSort->maxRows = -1;
|
|
||||||
pSort->groupSort = pSelect->groupSort;
|
pSort->groupSort = pSelect->groupSort;
|
||||||
pSort->node.groupAction = pSort->groupSort ? GROUP_ACTION_KEEP : GROUP_ACTION_CLEAR;
|
pSort->node.groupAction = pSort->groupSort ? GROUP_ACTION_KEEP : GROUP_ACTION_CLEAR;
|
||||||
pSort->node.requireDataOrder = DATA_ORDER_LEVEL_NONE;
|
pSort->node.requireDataOrder = DATA_ORDER_LEVEL_NONE;
|
||||||
|
@ -1299,7 +1298,6 @@ static int32_t createSetOpSortLogicNode(SLogicPlanContext* pCxt, SSetOperator* p
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
pSort->maxRows = -1;
|
|
||||||
TSWAP(pSort->node.pLimit, pSetOperator->pLimit);
|
TSWAP(pSort->node.pLimit, pSetOperator->pLimit);
|
||||||
|
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
|
|
@ -2635,11 +2635,13 @@ static bool pushDownLimitOptShouldBeOptimized(SLogicNode* pNode) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pNode->pChildren, 0);
|
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pNode->pChildren, 0);
|
||||||
|
// push down to sort node
|
||||||
if (QUERY_NODE_LOGIC_PLAN_SORT == nodeType(pChild)) {
|
if (QUERY_NODE_LOGIC_PLAN_SORT == nodeType(pChild)) {
|
||||||
SLimitNode* pChildLimit = (SLimitNode*)(pChild->pLimit);
|
|
||||||
// if we have pushed down, we skip it
|
// if we have pushed down, we skip it
|
||||||
if ((*(SSortLogicNode*)pChild).maxRows != -1) return false;
|
if (pChild->pLimit) return false;
|
||||||
} else if (QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(pChild)) {
|
} else if (QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(pChild) || QUERY_NODE_LOGIC_PLAN_SORT == nodeType(pNode)) {
|
||||||
|
// push down to table scan node
|
||||||
|
// if pNode is sortNode, we skip push down limit info to table scan node
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
@ -2654,13 +2656,10 @@ static int32_t pushDownLimitOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLog
|
||||||
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pNode->pChildren, 0);
|
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pNode->pChildren, 0);
|
||||||
nodesDestroyNode(pChild->pLimit);
|
nodesDestroyNode(pChild->pLimit);
|
||||||
if (QUERY_NODE_LOGIC_PLAN_SORT == nodeType(pChild)) {
|
if (QUERY_NODE_LOGIC_PLAN_SORT == nodeType(pChild)) {
|
||||||
SLimitNode* pLimitNode = (SLimitNode*)pNode->pLimit;
|
pChild->pLimit = nodesCloneNode(pNode->pLimit);
|
||||||
int64_t maxRows = -1;
|
SLimitNode* pLimit = (SLimitNode*)pChild->pLimit;
|
||||||
if (pLimitNode->limit != -1) {
|
pLimit->limit += pLimit->offset;
|
||||||
maxRows = pLimitNode->limit;
|
pLimit->offset = 0;
|
||||||
if (pLimitNode->offset != -1) maxRows += pLimitNode->offset;
|
|
||||||
}
|
|
||||||
((SSortLogicNode*)pChild)->maxRows = maxRows;
|
|
||||||
} else {
|
} else {
|
||||||
pChild->pLimit = pNode->pLimit;
|
pChild->pLimit = pNode->pLimit;
|
||||||
pNode->pLimit = NULL;
|
pNode->pLimit = NULL;
|
||||||
|
|
|
@ -1374,7 +1374,6 @@ static int32_t createSortPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren
|
||||||
if (NULL == pSort) {
|
if (NULL == pSort) {
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
pSort->maxRows = pSortLogicNode->maxRows;
|
|
||||||
|
|
||||||
SNodeList* pPrecalcExprs = NULL;
|
SNodeList* pPrecalcExprs = NULL;
|
||||||
SNodeList* pSortKeys = NULL;
|
SNodeList* pSortKeys = NULL;
|
||||||
|
|
|
@ -1018,7 +1018,6 @@ static int32_t stbSplCreatePartSortNode(SSortLogicNode* pSort, SLogicNode** pOut
|
||||||
splSetParent((SLogicNode*)pPartSort);
|
splSetParent((SLogicNode*)pPartSort);
|
||||||
pPartSort->pSortKeys = pSortKeys;
|
pPartSort->pSortKeys = pSortKeys;
|
||||||
pPartSort->groupSort = pSort->groupSort;
|
pPartSort->groupSort = pSort->groupSort;
|
||||||
pPartSort->maxRows = pSort->maxRows;
|
|
||||||
code = stbSplCreateMergeKeys(pPartSort->pSortKeys, pPartSort->node.pTargets, &pMergeKeys);
|
code = stbSplCreateMergeKeys(pPartSort->pSortKeys, pPartSort->node.pTargets, &pMergeKeys);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -441,6 +441,16 @@ class PlannerTestBaseImpl {
|
||||||
pCxt->topicQuery = true;
|
pCxt->topicQuery = true;
|
||||||
} else if (QUERY_NODE_CREATE_INDEX_STMT == nodeType(pQuery->pRoot)) {
|
} else if (QUERY_NODE_CREATE_INDEX_STMT == nodeType(pQuery->pRoot)) {
|
||||||
SMCreateSmaReq req = {0};
|
SMCreateSmaReq req = {0};
|
||||||
|
SCreateIndexStmt* pStmt = (SCreateIndexStmt*)pQuery->pRoot;
|
||||||
|
SCmdMsgInfo* pCmdMsg = (SCmdMsgInfo*)taosMemoryMalloc(sizeof(SCmdMsgInfo));
|
||||||
|
if (NULL == pCmdMsg) FAIL();
|
||||||
|
pCmdMsg->msgType = TDMT_MND_CREATE_SMA;
|
||||||
|
pCmdMsg->msgLen = tSerializeSMCreateSmaReq(NULL, 0, pStmt->pReq);
|
||||||
|
pCmdMsg->pMsg = taosMemoryMalloc(pCmdMsg->msgLen);
|
||||||
|
if (!pCmdMsg->pMsg) FAIL();
|
||||||
|
tSerializeSMCreateSmaReq(pCmdMsg->pMsg, pCmdMsg->msgLen, pStmt->pReq);
|
||||||
|
((SQuery*)pQuery)->pCmdMsg = pCmdMsg;
|
||||||
|
|
||||||
tDeserializeSMCreateSmaReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req);
|
tDeserializeSMCreateSmaReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req);
|
||||||
g_mockCatalogService->createSmaIndex(&req);
|
g_mockCatalogService->createSmaIndex(&req);
|
||||||
nodesStringToNode(req.ast, &pCxt->pAstRoot);
|
nodesStringToNode(req.ast, &pCxt->pAstRoot);
|
||||||
|
|
|
@ -1979,7 +1979,7 @@ int32_t fltInitValFieldData(SFilterInfo *info) {
|
||||||
int32_t code = sclConvertValueToSclParam(var, &out, NULL);
|
int32_t code = sclConvertValueToSclParam(var, &out, NULL);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
qError("convert value to type[%d] failed", type);
|
qError("convert value to type[%d] failed", type);
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t bufBytes = IS_VAR_DATA_TYPE(type) ? varDataTLen(out.columnData->pData)
|
size_t bufBytes = IS_VAR_DATA_TYPE(type) ? varDataTLen(out.columnData->pData)
|
||||||
|
@ -4644,11 +4644,11 @@ _return:
|
||||||
FLT_RET(code);
|
FLT_RET(code);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p, SColumnDataAgg *statis, int16_t numOfCols,
|
int32_t filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p, SColumnDataAgg *statis,
|
||||||
int32_t *pResultStatus) {
|
int16_t numOfCols, int32_t *pResultStatus) {
|
||||||
if (NULL == info) {
|
if (NULL == info) {
|
||||||
*pResultStatus = FILTER_RESULT_ALL_QUALIFIED;
|
*pResultStatus = FILTER_RESULT_ALL_QUALIFIED;
|
||||||
return false;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
SScalarParam output = {0};
|
SScalarParam output = {0};
|
||||||
|
@ -4656,7 +4656,7 @@ bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p, SC
|
||||||
|
|
||||||
int32_t code = sclCreateColumnInfoData(&type, pSrc->info.rows, &output);
|
int32_t code = sclCreateColumnInfoData(&type, pSrc->info.rows, &output);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
return false;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (info->scalarMode) {
|
if (info->scalarMode) {
|
||||||
|
@ -4666,7 +4666,7 @@ bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p, SC
|
||||||
code = scalarCalculate(info->sclCtx.node, pList, &output);
|
code = scalarCalculate(info->sclCtx.node, pList, &output);
|
||||||
taosArrayDestroy(pList);
|
taosArrayDestroy(pList);
|
||||||
|
|
||||||
FLT_ERR_RET(code); // TODO: current errcode returns as true
|
FLT_ERR_RET(code);
|
||||||
|
|
||||||
*p = output.columnData;
|
*p = output.columnData;
|
||||||
|
|
||||||
|
@ -4677,18 +4677,23 @@ bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p, SC
|
||||||
} else {
|
} else {
|
||||||
*pResultStatus = FILTER_RESULT_PARTIAL_QUALIFIED;
|
*pResultStatus = FILTER_RESULT_PARTIAL_QUALIFIED;
|
||||||
}
|
}
|
||||||
return false;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT(false == info->scalarMode);
|
||||||
|
*p = output.columnData;
|
||||||
|
output.numOfRows = pSrc->info.rows;
|
||||||
|
|
||||||
|
if (*p == NULL) {
|
||||||
|
return TSDB_CODE_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool keepAll = (*info->func)(info, pSrc->info.rows, *p, statis, numOfCols, &output.numOfQualified);
|
||||||
|
|
||||||
|
// todo this should be return during filter procedure
|
||||||
|
if (keepAll) {
|
||||||
|
*pResultStatus = FILTER_RESULT_ALL_QUALIFIED;
|
||||||
} else {
|
} else {
|
||||||
*p = output.columnData;
|
|
||||||
output.numOfRows = pSrc->info.rows;
|
|
||||||
|
|
||||||
if (*p == NULL) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool keep = (*info->func)(info, pSrc->info.rows, *p, statis, numOfCols, &output.numOfQualified);
|
|
||||||
|
|
||||||
// todo this should be return during filter procedure
|
|
||||||
int32_t num = 0;
|
int32_t num = 0;
|
||||||
for (int32_t i = 0; i < output.numOfRows; ++i) {
|
for (int32_t i = 0; i < output.numOfRows; ++i) {
|
||||||
if (((int8_t *)((*p)->pData))[i] == 1) {
|
if (((int8_t *)((*p)->pData))[i] == 1) {
|
||||||
|
@ -4703,9 +4708,9 @@ bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p, SC
|
||||||
} else {
|
} else {
|
||||||
*pResultStatus = FILTER_RESULT_PARTIAL_QUALIFIED;
|
*pResultStatus = FILTER_RESULT_PARTIAL_QUALIFIED;
|
||||||
}
|
}
|
||||||
|
|
||||||
return keep;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef struct SClassifyConditionCxt {
|
typedef struct SClassifyConditionCxt {
|
||||||
|
|
|
@ -1694,7 +1694,8 @@ int32_t scalarCalculate(SNode *pNode, SArray *pBlockList, SScalarParam *pDst) {
|
||||||
SCL_ERR_JRET(TSDB_CODE_APP_ERROR);
|
SCL_ERR_JRET(TSDB_CODE_APP_ERROR);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (1 == res->numOfRows) {
|
SSDataBlock *pb = taosArrayGetP(pBlockList, 0);
|
||||||
|
if (1 == res->numOfRows && pb->info.rows > 0) {
|
||||||
SCL_ERR_JRET(sclExtendResRows(pDst, res, pBlockList));
|
SCL_ERR_JRET(sclExtendResRows(pDst, res, pBlockList));
|
||||||
} else {
|
} else {
|
||||||
colInfoDataEnsureCapacity(pDst->columnData, res->numOfRows, true);
|
colInfoDataEnsureCapacity(pDst->columnData, res->numOfRows, true);
|
||||||
|
|
|
@ -240,15 +240,20 @@ _getValueAddr_fn_t getVectorValueAddrFn(int32_t srcType) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static FORCE_INLINE void varToTimestamp(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
|
static FORCE_INLINE void varToTimestamp(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
|
||||||
|
terrno = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
int64_t value = 0;
|
int64_t value = 0;
|
||||||
if (taosParseTime(buf, &value, strlen(buf), pOut->columnData->info.precision, tsDaylight) != TSDB_CODE_SUCCESS) {
|
if (taosParseTime(buf, &value, strlen(buf), pOut->columnData->info.precision, tsDaylight) != TSDB_CODE_SUCCESS) {
|
||||||
value = 0;
|
value = 0;
|
||||||
|
terrno = TSDB_CODE_SCALAR_CONVERT_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
colDataSetInt64(pOut->columnData, rowIndex, &value);
|
colDataSetInt64(pOut->columnData, rowIndex, &value);
|
||||||
}
|
}
|
||||||
|
|
||||||
static FORCE_INLINE void varToSigned(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
|
static FORCE_INLINE void varToSigned(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
|
||||||
|
terrno = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
if (overflow) {
|
if (overflow) {
|
||||||
int64_t minValue = tDataTypes[pOut->columnData->info.type].minValue;
|
int64_t minValue = tDataTypes[pOut->columnData->info.type].minValue;
|
||||||
int64_t maxValue = tDataTypes[pOut->columnData->info.type].maxValue;
|
int64_t maxValue = tDataTypes[pOut->columnData->info.type].maxValue;
|
||||||
|
@ -290,6 +295,8 @@ static FORCE_INLINE void varToSigned(char *buf, SScalarParam *pOut, int32_t rowI
|
||||||
}
|
}
|
||||||
|
|
||||||
static FORCE_INLINE void varToUnsigned(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
|
static FORCE_INLINE void varToUnsigned(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
|
||||||
|
terrno = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
if (overflow) {
|
if (overflow) {
|
||||||
uint64_t minValue = (uint64_t)tDataTypes[pOut->columnData->info.type].minValue;
|
uint64_t minValue = (uint64_t)tDataTypes[pOut->columnData->info.type].minValue;
|
||||||
uint64_t maxValue = (uint64_t)tDataTypes[pOut->columnData->info.type].maxValue;
|
uint64_t maxValue = (uint64_t)tDataTypes[pOut->columnData->info.type].maxValue;
|
||||||
|
@ -330,6 +337,8 @@ static FORCE_INLINE void varToUnsigned(char *buf, SScalarParam *pOut, int32_t ro
|
||||||
}
|
}
|
||||||
|
|
||||||
static FORCE_INLINE void varToFloat(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
|
static FORCE_INLINE void varToFloat(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
|
||||||
|
terrno = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
if (TSDB_DATA_TYPE_FLOAT == pOut->columnData->info.type) {
|
if (TSDB_DATA_TYPE_FLOAT == pOut->columnData->info.type) {
|
||||||
float value = taosStr2Float(buf, NULL);
|
float value = taosStr2Float(buf, NULL);
|
||||||
colDataSetFloat(pOut->columnData, rowIndex, &value);
|
colDataSetFloat(pOut->columnData, rowIndex, &value);
|
||||||
|
@ -341,6 +350,8 @@ static FORCE_INLINE void varToFloat(char *buf, SScalarParam *pOut, int32_t rowIn
|
||||||
}
|
}
|
||||||
|
|
||||||
static FORCE_INLINE void varToBool(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
|
static FORCE_INLINE void varToBool(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
|
||||||
|
terrno = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
int64_t value = taosStr2Int64(buf, NULL, 10);
|
int64_t value = taosStr2Int64(buf, NULL, 10);
|
||||||
bool v = (value != 0) ? true : false;
|
bool v = (value != 0) ? true : false;
|
||||||
colDataSetInt8(pOut->columnData, rowIndex, (int8_t *)&v);
|
colDataSetInt8(pOut->columnData, rowIndex, (int8_t *)&v);
|
||||||
|
@ -348,6 +359,8 @@ static FORCE_INLINE void varToBool(char *buf, SScalarParam *pOut, int32_t rowInd
|
||||||
|
|
||||||
// todo remove this malloc
|
// todo remove this malloc
|
||||||
static FORCE_INLINE void varToNchar(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
|
static FORCE_INLINE void varToNchar(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
|
||||||
|
terrno = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
int32_t len = 0;
|
int32_t len = 0;
|
||||||
int32_t inputLen = varDataLen(buf);
|
int32_t inputLen = varDataLen(buf);
|
||||||
int32_t outputMaxLen = (inputLen + 1) * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE;
|
int32_t outputMaxLen = (inputLen + 1) * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE;
|
||||||
|
@ -357,6 +370,7 @@ static FORCE_INLINE void varToNchar(char *buf, SScalarParam *pOut, int32_t rowIn
|
||||||
taosMbsToUcs4(varDataVal(buf), inputLen, (TdUcs4 *)varDataVal(t), outputMaxLen - VARSTR_HEADER_SIZE, &len);
|
taosMbsToUcs4(varDataVal(buf), inputLen, (TdUcs4 *)varDataVal(t), outputMaxLen - VARSTR_HEADER_SIZE, &len);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
sclError("failed to convert to NCHAR");
|
sclError("failed to convert to NCHAR");
|
||||||
|
terrno = TSDB_CODE_SCALAR_CONVERT_ERROR;
|
||||||
}
|
}
|
||||||
varDataSetLen(t, len);
|
varDataSetLen(t, len);
|
||||||
|
|
||||||
|
@ -365,11 +379,14 @@ static FORCE_INLINE void varToNchar(char *buf, SScalarParam *pOut, int32_t rowIn
|
||||||
}
|
}
|
||||||
|
|
||||||
static FORCE_INLINE void ncharToVar(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
|
static FORCE_INLINE void ncharToVar(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
|
||||||
|
terrno = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
int32_t inputLen = varDataLen(buf);
|
int32_t inputLen = varDataLen(buf);
|
||||||
|
|
||||||
char *t = taosMemoryCalloc(1, inputLen + VARSTR_HEADER_SIZE);
|
char *t = taosMemoryCalloc(1, inputLen + VARSTR_HEADER_SIZE);
|
||||||
int32_t len = taosUcs4ToMbs((TdUcs4 *)varDataVal(buf), varDataLen(buf), varDataVal(t));
|
int32_t len = taosUcs4ToMbs((TdUcs4 *)varDataVal(buf), varDataLen(buf), varDataVal(t));
|
||||||
if (len < 0) {
|
if (len < 0) {
|
||||||
|
terrno = TSDB_CODE_SCALAR_CONVERT_ERROR;
|
||||||
taosMemoryFree(t);
|
taosMemoryFree(t);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -379,22 +396,26 @@ static FORCE_INLINE void ncharToVar(char *buf, SScalarParam *pOut, int32_t rowIn
|
||||||
taosMemoryFree(t);
|
taosMemoryFree(t);
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo remove this malloc
|
|
||||||
static FORCE_INLINE void varToGeometry(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
|
static FORCE_INLINE void varToGeometry(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
|
||||||
//[ToDo] support to parse WKB as well as WKT
|
//[ToDo] support to parse WKB as well as WKT
|
||||||
unsigned char *t = NULL;
|
terrno = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
size_t len = 0;
|
size_t len = 0;
|
||||||
|
unsigned char *t = NULL;
|
||||||
|
char *output = NULL;
|
||||||
|
|
||||||
if (initCtxGeomFromText()) {
|
if (initCtxGeomFromText()) {
|
||||||
sclError("failed to init geometry ctx");
|
sclError("failed to init geometry ctx, %s", getThreadLocalGeosCtx()->errMsg);
|
||||||
return;
|
terrno = TSDB_CODE_APP_ERROR;
|
||||||
|
goto _err;
|
||||||
}
|
}
|
||||||
if (doGeomFromText(buf, &t, &len)) {
|
if (doGeomFromText(buf, &t, &len)) {
|
||||||
sclDebug("failed to convert text to geometry");
|
sclInfo("failed to convert text to geometry, %s", getThreadLocalGeosCtx()->errMsg);
|
||||||
return;
|
terrno = TSDB_CODE_SCALAR_CONVERT_ERROR;
|
||||||
|
goto _err;
|
||||||
}
|
}
|
||||||
|
|
||||||
char *output = taosMemoryCalloc(1, len + VARSTR_HEADER_SIZE);
|
output = taosMemoryCalloc(1, len + VARSTR_HEADER_SIZE);
|
||||||
memcpy(output + VARSTR_HEADER_SIZE, t, len);
|
memcpy(output + VARSTR_HEADER_SIZE, t, len);
|
||||||
varDataSetLen(output, len);
|
varDataSetLen(output, len);
|
||||||
|
|
||||||
|
@ -402,10 +423,19 @@ static FORCE_INLINE void varToGeometry(char *buf, SScalarParam *pOut, int32_t ro
|
||||||
|
|
||||||
taosMemoryFree(output);
|
taosMemoryFree(output);
|
||||||
geosFreeBuffer(t);
|
geosFreeBuffer(t);
|
||||||
|
|
||||||
|
return;
|
||||||
|
|
||||||
|
_err:
|
||||||
|
ASSERT(t == NULL && len == 0);
|
||||||
|
VarDataLenT dummyHeader = 0;
|
||||||
|
colDataSetVal(pOut->columnData, rowIndex, (const char *)&dummyHeader, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO opt performance, tmp is not needed.
|
// TODO opt performance, tmp is not needed.
|
||||||
int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) {
|
int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) {
|
||||||
|
terrno = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
bool vton = false;
|
bool vton = false;
|
||||||
|
|
||||||
_bufConverteFunc func = NULL;
|
_bufConverteFunc func = NULL;
|
||||||
|
@ -431,7 +461,8 @@ int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) {
|
||||||
func = varToGeometry;
|
func = varToGeometry;
|
||||||
} else {
|
} else {
|
||||||
sclError("invalid convert outType:%d, inType:%d", pCtx->outType, pCtx->inType);
|
sclError("invalid convert outType:%d, inType:%d", pCtx->outType, pCtx->inType);
|
||||||
return TSDB_CODE_APP_ERROR;
|
terrno = TSDB_CODE_APP_ERROR;
|
||||||
|
return terrno;
|
||||||
}
|
}
|
||||||
|
|
||||||
pCtx->pOut->numOfRows = pCtx->pIn->numOfRows;
|
pCtx->pOut->numOfRows = pCtx->pIn->numOfRows;
|
||||||
|
@ -451,7 +482,7 @@ int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) {
|
||||||
convertType = TSDB_DATA_TYPE_NCHAR;
|
convertType = TSDB_DATA_TYPE_NCHAR;
|
||||||
} else if (tTagIsJson(data) || *data == TSDB_DATA_TYPE_NULL) {
|
} else if (tTagIsJson(data) || *data == TSDB_DATA_TYPE_NULL) {
|
||||||
terrno = TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR;
|
terrno = TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR;
|
||||||
return terrno;
|
goto _err;
|
||||||
} else {
|
} else {
|
||||||
convertNumberToNumber(data + CHAR_BYTES, colDataGetNumData(pCtx->pOut->columnData, i), *data, pCtx->outType);
|
convertNumberToNumber(data + CHAR_BYTES, colDataGetNumData(pCtx->pOut->columnData, i), *data, pCtx->outType);
|
||||||
continue;
|
continue;
|
||||||
|
@ -463,7 +494,8 @@ int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) {
|
||||||
tmp = taosMemoryMalloc(bufSize);
|
tmp = taosMemoryMalloc(bufSize);
|
||||||
if (tmp == NULL) {
|
if (tmp == NULL) {
|
||||||
sclError("out of memory in vectorConvertFromVarData");
|
sclError("out of memory in vectorConvertFromVarData");
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
goto _err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -477,15 +509,15 @@ int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) {
|
||||||
// we need to convert it to native char string, and then perform the string to numeric data
|
// we need to convert it to native char string, and then perform the string to numeric data
|
||||||
if (varDataLen(data) > bufSize) {
|
if (varDataLen(data) > bufSize) {
|
||||||
sclError("castConvert convert buffer size too small");
|
sclError("castConvert convert buffer size too small");
|
||||||
taosMemoryFreeClear(tmp);
|
terrno = TSDB_CODE_APP_ERROR;
|
||||||
return TSDB_CODE_APP_ERROR;
|
goto _err;
|
||||||
}
|
}
|
||||||
|
|
||||||
int len = taosUcs4ToMbs((TdUcs4 *)varDataVal(data), varDataLen(data), tmp);
|
int len = taosUcs4ToMbs((TdUcs4 *)varDataVal(data), varDataLen(data), tmp);
|
||||||
if (len < 0) {
|
if (len < 0) {
|
||||||
sclError("castConvert taosUcs4ToMbs error 1");
|
sclError("castConvert taosUcs4ToMbs error 1");
|
||||||
taosMemoryFreeClear(tmp);
|
terrno = TSDB_CODE_SCALAR_CONVERT_ERROR;
|
||||||
return TSDB_CODE_APP_ERROR;
|
goto _err;
|
||||||
}
|
}
|
||||||
|
|
||||||
tmp[len] = 0;
|
tmp[len] = 0;
|
||||||
|
@ -493,12 +525,16 @@ int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) {
|
||||||
}
|
}
|
||||||
|
|
||||||
(*func)(tmp, pCtx->pOut, i, overflow);
|
(*func)(tmp, pCtx->pOut, i, overflow);
|
||||||
|
if (terrno != TSDB_CODE_SUCCESS) {
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_err:
|
||||||
if (tmp != NULL) {
|
if (tmp != NULL) {
|
||||||
taosMemoryFreeClear(tmp);
|
taosMemoryFreeClear(tmp);
|
||||||
}
|
}
|
||||||
return TSDB_CODE_SUCCESS;
|
return terrno;
|
||||||
}
|
}
|
||||||
|
|
||||||
double getVectorDoubleValue_JSON(void *src, int32_t index) {
|
double getVectorDoubleValue_JSON(void *src, int32_t index) {
|
||||||
|
@ -911,25 +947,25 @@ int32_t vectorConvertSingleColImpl(const SScalarParam *pIn, SScalarParam *pOut,
|
||||||
int8_t gConvertTypes[TSDB_DATA_TYPE_MAX][TSDB_DATA_TYPE_MAX] = {
|
int8_t gConvertTypes[TSDB_DATA_TYPE_MAX][TSDB_DATA_TYPE_MAX] = {
|
||||||
/* NULL BOOL TINY SMAL INT BIG FLOA DOUB VARC TIME NCHA UTIN USMA UINT UBIG JSON VARB DECI BLOB MEDB GEOM*/
|
/* NULL BOOL TINY SMAL INT BIG FLOA DOUB VARC TIME NCHA UTIN USMA UINT UBIG JSON VARB DECI BLOB MEDB GEOM*/
|
||||||
/*NULL*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
/*NULL*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
/*BOOL*/ 0, 0, 2, 3, 4, 5, 6, 7, 5, 9, 7, 11, 12, 13, 14, 0, 7, 0, 0, 0, 0,
|
/*BOOL*/ 0, 0, 2, 3, 4, 5, 6, 7, 5, 9, 7, 11, 12, 13, 14, 0, 7, 0, 0, 0, -1,
|
||||||
/*TINY*/ 0, 0, 0, 3, 4, 5, 6, 7, 5, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0, 0, 0,
|
/*TINY*/ 0, 0, 0, 3, 4, 5, 6, 7, 5, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0, 0, -1,
|
||||||
/*SMAL*/ 0, 0, 0, 0, 4, 5, 6, 7, 5, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0, 0, 0,
|
/*SMAL*/ 0, 0, 0, 0, 4, 5, 6, 7, 5, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0, 0, -1,
|
||||||
/*INT */ 0, 0, 0, 0, 0, 5, 6, 7, 5, 9, 7, 4, 4, 5, 7, 0, 7, 0, 0, 0, 0,
|
/*INT */ 0, 0, 0, 0, 0, 5, 6, 7, 5, 9, 7, 4, 4, 5, 7, 0, 7, 0, 0, 0, -1,
|
||||||
/*BIGI*/ 0, 0, 0, 0, 0, 0, 6, 7, 5, 9, 7, 5, 5, 5, 7, 0, 7, 0, 0, 0, 0,
|
/*BIGI*/ 0, 0, 0, 0, 0, 0, 6, 7, 5, 9, 7, 5, 5, 5, 7, 0, 7, 0, 0, 0, -1,
|
||||||
/*FLOA*/ 0, 0, 0, 0, 0, 0, 0, 7, 7, 6, 7, 6, 6, 6, 6, 0, 7, 0, 0, 0, 0,
|
/*FLOA*/ 0, 0, 0, 0, 0, 0, 0, 7, 7, 6, 7, 6, 6, 6, 6, 0, 7, 0, 0, 0, -1,
|
||||||
/*DOUB*/ 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7, 7, 0, 7, 0, 0, 0, 0,
|
/*DOUB*/ 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7, 7, 0, 7, 0, 0, 0, -1,
|
||||||
/*VARC*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 8, 7, 7, 7, 7, 0, 0, 0, 0, 0, 20,
|
/*VARC*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 8, 7, 7, 7, 7, 0, 0, 0, 0, 0, 20,
|
||||||
/*TIME*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 7, 0, 7, 0, 0, 0, 0,
|
/*TIME*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 7, 0, 7, 0, 0, 0, -1,
|
||||||
/*NCHA*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 0, 0, 0, 0, 0, 0,
|
/*NCHA*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 0, 0, 0, 0, 0, -1,
|
||||||
/*UTIN*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 13, 14, 0, 7, 0, 0, 0, 0,
|
/*UTIN*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 13, 14, 0, 7, 0, 0, 0, -1,
|
||||||
/*USMA*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 14, 0, 7, 0, 0, 0, 0,
|
/*USMA*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 14, 0, 7, 0, 0, 0, -1,
|
||||||
/*UINT*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 0, 7, 0, 0, 0, 0,
|
/*UINT*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 0, 7, 0, 0, 0, -1,
|
||||||
/*UBIG*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0,
|
/*UBIG*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, -1,
|
||||||
/*JSON*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
/*JSON*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1,
|
||||||
/*VARB*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
/*VARB*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1,
|
||||||
/*DECI*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
/*DECI*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1,
|
||||||
/*BLOB*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
/*BLOB*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1,
|
||||||
/*MEDB*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
/*MEDB*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1,
|
||||||
/*GEOM*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
/*GEOM*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
||||||
|
|
||||||
int32_t vectorGetConvertType(int32_t type1, int32_t type2) {
|
int32_t vectorGetConvertType(int32_t type1, int32_t type2) {
|
||||||
|
@ -1010,6 +1046,11 @@ int32_t vectorConvertCols(SScalarParam *pLeft, SScalarParam *pRight, SScalarPara
|
||||||
if (0 == type) {
|
if (0 == type) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
if (-1 == type) {
|
||||||
|
sclError("invalid convert type1:%d, type2:%d", GET_PARAM_TYPE(param1), GET_PARAM_TYPE(param2));
|
||||||
|
terrno = TSDB_CODE_SCALAR_CONVERT_ERROR;
|
||||||
|
return TSDB_CODE_SCALAR_CONVERT_ERROR;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (type != GET_PARAM_TYPE(param1)) {
|
if (type != GET_PARAM_TYPE(param1)) {
|
||||||
|
@ -1753,7 +1794,9 @@ void vectorCompareImpl(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *
|
||||||
param1 = pLeft;
|
param1 = pLeft;
|
||||||
param2 = pRight;
|
param2 = pRight;
|
||||||
} else {
|
} else {
|
||||||
vectorConvertCols(pLeft, pRight, &pLeftOut, &pRightOut, startIndex, numOfRows);
|
if (vectorConvertCols(pLeft, pRight, &pLeftOut, &pRightOut, startIndex, numOfRows)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
param1 = (pLeftOut.columnData != NULL) ? &pLeftOut : pLeft;
|
param1 = (pLeftOut.columnData != NULL) ? &pLeftOut : pLeft;
|
||||||
param2 = (pRightOut.columnData != NULL) ? &pRightOut : pRight;
|
param2 = (pRightOut.columnData != NULL) ? &pRightOut : pRight;
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,11 +48,12 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock)
|
||||||
|
|
||||||
int32_t tEncodeStreamRetrieveReq(SEncoder* pEncoder, const SStreamRetrieveReq* pReq);
|
int32_t tEncodeStreamRetrieveReq(SEncoder* pEncoder, const SStreamRetrieveReq* pReq);
|
||||||
|
|
||||||
int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData);
|
int32_t streamSaveTasks(SStreamMeta* pMeta, int64_t checkpointId);
|
||||||
int32_t streamDispatchCheckMsg(SStreamTask* pTask, const SStreamTaskCheckReq* pReq, int32_t nodeId, SEpSet* pEpSet);
|
int32_t streamDispatchCheckMsg(SStreamTask* pTask, const SStreamTaskCheckReq* pReq, int32_t nodeId, SEpSet* pEpSet);
|
||||||
int32_t streamDispatchCheckpointMsg(SStreamTask* pTask, const SStreamCheckpointReq* pReq, int32_t nodeId, SEpSet* pEpSet);
|
int32_t streamDispatchCheckpointMsg(SStreamTask* pTask, const SStreamCheckpointReq* pReq, int32_t nodeId, SEpSet* pEpSet);
|
||||||
int32_t streamTaskSendCheckpointRsp(SStreamTask* pTask);
|
int32_t streamTaskSendCheckpointRsp(SStreamTask* pTask);
|
||||||
int32_t streamTaskSendCheckpointSourceRsp(SStreamTask* pTask);
|
int32_t streamTaskSendCheckpointSourceRsp(SStreamTask* pTask);
|
||||||
|
int32_t streamTaskGetNumOfDownstream(const SStreamTask* pTask);
|
||||||
|
|
||||||
int32_t extractBlocksFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInput, int32_t* numOfBlocks, const char* id);
|
int32_t extractBlocksFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInput, int32_t* numOfBlocks, const char* id);
|
||||||
SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem* pElem);
|
SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem* pElem);
|
||||||
|
|
|
@ -174,7 +174,7 @@ int32_t streamTaskEnqueueBlocks(SStreamTask* pTask, const SStreamDispatchReq* pR
|
||||||
pRsp->contLen = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp);
|
pRsp->contLen = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp);
|
||||||
tmsgSendRsp(pRsp);
|
tmsgSendRsp(pRsp);
|
||||||
|
|
||||||
return status == TASK_INPUT_STATUS__NORMAL ? 0 : -1;
|
return (status == TASK_INPUT_STATUS__NORMAL) ? 0 : -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamTaskEnqueueRetrieve(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pRsp) {
|
int32_t streamTaskEnqueueRetrieve(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pRsp) {
|
||||||
|
@ -239,7 +239,8 @@ int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, S
|
||||||
qDebug("s-task:%s receive dispatch msg from taskId:0x%x(vgId:%d), msgLen:%" PRId64, pTask->id.idStr,
|
qDebug("s-task:%s receive dispatch msg from taskId:0x%x(vgId:%d), msgLen:%" PRId64, pTask->id.idStr,
|
||||||
pReq->upstreamTaskId, pReq->upstreamNodeId, pReq->totalLen);
|
pReq->upstreamTaskId, pReq->upstreamNodeId, pReq->totalLen);
|
||||||
|
|
||||||
// todo add the input queue buffer limitation
|
// if current task has received the checkpoint req from the upstream t#1, the msg from t#1 should all blocked
|
||||||
|
|
||||||
streamTaskEnqueueBlocks(pTask, pReq, pRsp);
|
streamTaskEnqueueBlocks(pTask, pReq, pRsp);
|
||||||
tDeleteStreamDispatchReq(pReq);
|
tDeleteStreamDispatchReq(pReq);
|
||||||
|
|
||||||
|
@ -254,69 +255,6 @@ int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, S
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo record the idle time for dispatch data
|
|
||||||
int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code) {
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
|
||||||
// dispatch message failed: network error, or node not available.
|
|
||||||
// in case of the input queue is full, the code will be TSDB_CODE_SUCCESS, the and pRsp>inputStatus will be set
|
|
||||||
// flag. here we need to retry dispatch this message to downstream task immediately. handle the case the failure
|
|
||||||
// happened too fast. todo handle the shuffle dispatch failure
|
|
||||||
qError("s-task:%s failed to dispatch msg to task:0x%x, code:%s, retry cnt:%d", pTask->id.idStr,
|
|
||||||
pRsp->downstreamTaskId, tstrerror(code), ++pTask->msgInfo.retryCount);
|
|
||||||
int32_t ret = streamDispatchAllBlocks(pTask, pTask->msgInfo.pData);
|
|
||||||
if (ret != TSDB_CODE_SUCCESS) {
|
|
||||||
}
|
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
qDebug("s-task:%s receive dispatch rsp, output status:%d code:%d", pTask->id.idStr, pRsp->inputStatus, code);
|
|
||||||
|
|
||||||
// there are other dispatch message not response yet
|
|
||||||
if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
|
||||||
int32_t leftRsp = atomic_sub_fetch_32(&pTask->shuffleDispatcher.waitingRspCnt, 1);
|
|
||||||
qDebug("s-task:%s is shuffle, left waiting rsp %d", pTask->id.idStr, leftRsp);
|
|
||||||
if (leftRsp > 0) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pTask->msgInfo.retryCount = 0;
|
|
||||||
ASSERT(pTask->outputStatus == TASK_OUTPUT_STATUS__WAIT);
|
|
||||||
|
|
||||||
qDebug("s-task:%s output status is set to:%d", pTask->id.idStr, pTask->outputStatus);
|
|
||||||
|
|
||||||
// the input queue of the (down stream) task that receive the output data is full,
|
|
||||||
// so the TASK_INPUT_STATUS_BLOCKED is rsp
|
|
||||||
// todo blocking the output status
|
|
||||||
if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) {
|
|
||||||
pTask->msgInfo.blockingTs = taosGetTimestampMs(); // record the blocking start time
|
|
||||||
|
|
||||||
int32_t waitDuration = 300; // 300 ms
|
|
||||||
qError("s-task:%s inputQ of downstream task:0x%x is full, time:%" PRId64 "wait for %dms and retry dispatch data",
|
|
||||||
pTask->id.idStr, pRsp->downstreamTaskId, pTask->msgInfo.blockingTs, waitDuration);
|
|
||||||
streamRetryDispatchStreamBlock(pTask, waitDuration);
|
|
||||||
} else { // pipeline send data in output queue
|
|
||||||
// this message has been sent successfully, let's try next one.
|
|
||||||
destroyStreamDataBlock(pTask->msgInfo.pData);
|
|
||||||
pTask->msgInfo.pData = NULL;
|
|
||||||
|
|
||||||
if (pTask->msgInfo.blockingTs != 0) {
|
|
||||||
int64_t el = taosGetTimestampMs() - pTask->msgInfo.blockingTs;
|
|
||||||
qDebug("s-task:%s resume to normal from inputQ blocking, idle time:%" PRId64 "ms", pTask->id.idStr, el);
|
|
||||||
pTask->msgInfo.blockingTs = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// now ready for next data output
|
|
||||||
atomic_store_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL);
|
|
||||||
|
|
||||||
// otherwise, continue dispatch the first block to down stream task in pipeline
|
|
||||||
streamDispatchStreamBlock(pTask);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t streamProcessRunReq(SStreamTask* pTask) {
|
int32_t streamProcessRunReq(SStreamTask* pTask) {
|
||||||
if (streamTryExec(pTask) < 0) {
|
if (streamTryExec(pTask) < 0) {
|
||||||
return -1;
|
return -1;
|
||||||
|
|
|
@ -219,7 +219,7 @@ int32_t streamProcessCheckpointReq(SStreamTask* pTask, SStreamCheckpointReq* pRe
|
||||||
// anymore
|
// anymore
|
||||||
ASSERT(taosArrayGetSize(pTask->pUpstreamEpInfoList) > 0);
|
ASSERT(taosArrayGetSize(pTask->pUpstreamEpInfoList) > 0);
|
||||||
|
|
||||||
// there are still some upstream tasks not send checkpoint request
|
// there are still some upstream tasks not send checkpoint request, do nothing and wait for then
|
||||||
int32_t notReady = streamAlignCheckpoint(pTask, checkpointId, childId);
|
int32_t notReady = streamAlignCheckpoint(pTask, checkpointId, childId);
|
||||||
if (notReady > 0) {
|
if (notReady > 0) {
|
||||||
int32_t num = taosArrayGetSize(pTask->pUpstreamEpInfoList);
|
int32_t num = taosArrayGetSize(pTask->pUpstreamEpInfoList);
|
||||||
|
@ -230,12 +230,13 @@ int32_t streamProcessCheckpointReq(SStreamTask* pTask, SStreamCheckpointReq* pRe
|
||||||
|
|
||||||
qDebug("s-task:%s received checkpoint req, all upstream sent checkpoint msg, dispatch checkpoint msg to downstream",
|
qDebug("s-task:%s received checkpoint req, all upstream sent checkpoint msg, dispatch checkpoint msg to downstream",
|
||||||
pTask->id.idStr);
|
pTask->id.idStr);
|
||||||
pTask->checkpointNotReadyTasks = (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH)
|
|
||||||
? 1
|
// set the needed checked downstream tasks, only when all downstream tasks do checkpoint complete, this node
|
||||||
: taosArrayGetSize(pTask->shuffleDispatcher.dbInfo.pVgroupInfos);
|
// can start local checkpoint procedure
|
||||||
|
pTask->checkpointNotReadyTasks = streamTaskGetNumOfDownstream(pTask);
|
||||||
|
|
||||||
// if all upstreams are ready for generating checkpoint, set the status to be TASK_STATUS__CK_READY
|
// if all upstreams are ready for generating checkpoint, set the status to be TASK_STATUS__CK_READY
|
||||||
// 2. dispatch check point msg to all downstream tasks
|
// dispatch check point msg to all downstream tasks
|
||||||
streamTaskDispatchCheckpointMsg(pTask, checkpointId);
|
streamTaskDispatchCheckpointMsg(pTask, checkpointId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -257,8 +258,38 @@ int32_t streamProcessCheckpointRsp(SStreamMeta* pMeta, SStreamTask* pTask) {
|
||||||
appendCheckpointIntoInputQ(pTask);
|
appendCheckpointIntoInputQ(pTask);
|
||||||
streamSchedExec(pTask);
|
streamSchedExec(pTask);
|
||||||
} else {
|
} else {
|
||||||
qDebug("s-task:%s %d downstream tasks are not ready, wait", pTask->id.idStr, notReady);
|
int32_t total = streamTaskGetNumOfDownstream(pTask);
|
||||||
|
qDebug("s-task:%s %d/%d downstream tasks are not ready, wait", pTask->id.idStr, notReady, total);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t streamSaveTasks(SStreamMeta* pMeta, int64_t checkpointId) {
|
||||||
|
taosWLockLatch(&pMeta->lock);
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < taosArrayGetSize(pMeta->pTaskList); ++i) {
|
||||||
|
uint32_t* pTaskId = taosArrayGet(pMeta->pTaskList, i);
|
||||||
|
SStreamTask* p = *(SStreamTask**)taosHashGet(pMeta->pTasks, pTaskId, sizeof(*pTaskId));
|
||||||
|
|
||||||
|
ASSERT(p->chkInfo.keptCheckpointId < p->checkpointingId && p->checkpointingId == checkpointId);
|
||||||
|
p->chkInfo.keptCheckpointId = p->checkpointingId;
|
||||||
|
|
||||||
|
streamMetaSaveTask(pMeta, p);
|
||||||
|
qDebug("vgId:%d s-task:%s commit task status after checkpoint completed, checkpointId:%" PRId64
|
||||||
|
", ver:%" PRId64 " currentVer:%" PRId64,
|
||||||
|
pMeta->vgId, p->id.idStr, checkpointId, p->chkInfo.version, p->chkInfo.currentVer);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (streamMetaCommit(pMeta) < 0) {
|
||||||
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
|
qError("vgId:%d failed to commit stream meta after do checkpoint, checkpointId:%" PRId64", since %s",
|
||||||
|
pMeta->vgId, checkpointId, terrstr());
|
||||||
|
return -1;
|
||||||
|
} else {
|
||||||
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
|
qInfo("vgId:%d commit stream meta after do checkpoint, checkpointId:%. DONE" PRId64, pMeta->vgId, checkpointId);
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
|
@ -120,6 +120,7 @@ SStreamDataSubmit* streamDataSubmitNew(SPackedData* pData, int32_t type) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pDataSubmit->ver = pData->ver;
|
||||||
pDataSubmit->submit = *pData;
|
pDataSubmit->submit = *pData;
|
||||||
*pDataSubmit->dataRef = 1; // initialize the reference count to be 1
|
*pDataSubmit->dataRef = 1; // initialize the reference count to be 1
|
||||||
pDataSubmit->type = type;
|
pDataSubmit->type = type;
|
||||||
|
|
|
@ -238,7 +238,7 @@ int32_t streamDispatchCheckMsg(SStreamTask* pTask, const SStreamTaskCheckReq* pR
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData) {
|
static int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
|
||||||
int32_t numOfBlocks = taosArrayGetSize(pData->blocks);
|
int32_t numOfBlocks = taosArrayGetSize(pData->blocks);
|
||||||
|
@ -807,3 +807,66 @@ int32_t streamAddCheckpointRspMsg(SStreamCheckpointReq* pReq, SRpcHandleInfo* pR
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// todo record the idle time for dispatch data
|
||||||
|
int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code) {
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
// dispatch message failed: network error, or node not available.
|
||||||
|
// in case of the input queue is full, the code will be TSDB_CODE_SUCCESS, the and pRsp>inputStatus will be set
|
||||||
|
// flag. here we need to retry dispatch this message to downstream task immediately. handle the case the failure
|
||||||
|
// happened too fast. todo handle the shuffle dispatch failure
|
||||||
|
qError("s-task:%s failed to dispatch msg to task:0x%x, code:%s, retry cnt:%d", pTask->id.idStr,
|
||||||
|
pRsp->downstreamTaskId, tstrerror(code), ++pTask->msgInfo.retryCount);
|
||||||
|
int32_t ret = streamDispatchAllBlocks(pTask, pTask->msgInfo.pData);
|
||||||
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
qDebug("s-task:%s receive dispatch rsp, output status:%d code:%d", pTask->id.idStr, pRsp->inputStatus, code);
|
||||||
|
|
||||||
|
// there are other dispatch message not response yet
|
||||||
|
if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||||
|
int32_t leftRsp = atomic_sub_fetch_32(&pTask->shuffleDispatcher.waitingRspCnt, 1);
|
||||||
|
qDebug("s-task:%s is shuffle, left waiting rsp %d", pTask->id.idStr, leftRsp);
|
||||||
|
if (leftRsp > 0) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pTask->msgInfo.retryCount = 0;
|
||||||
|
ASSERT(pTask->outputStatus == TASK_OUTPUT_STATUS__WAIT);
|
||||||
|
|
||||||
|
qDebug("s-task:%s output status is set to:%d", pTask->id.idStr, pTask->outputStatus);
|
||||||
|
|
||||||
|
// the input queue of the (down stream) task that receive the output data is full,
|
||||||
|
// so the TASK_INPUT_STATUS_BLOCKED is rsp
|
||||||
|
// todo blocking the output status
|
||||||
|
if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) {
|
||||||
|
pTask->msgInfo.blockingTs = taosGetTimestampMs(); // record the blocking start time
|
||||||
|
|
||||||
|
int32_t waitDuration = 300; // 300 ms
|
||||||
|
qError("s-task:%s inputQ of downstream task:0x%x is full, time:%" PRId64 "wait for %dms and retry dispatch data",
|
||||||
|
pTask->id.idStr, pRsp->downstreamTaskId, pTask->msgInfo.blockingTs, waitDuration);
|
||||||
|
streamRetryDispatchStreamBlock(pTask, waitDuration);
|
||||||
|
} else { // pipeline send data in output queue
|
||||||
|
// this message has been sent successfully, let's try next one.
|
||||||
|
destroyStreamDataBlock(pTask->msgInfo.pData);
|
||||||
|
pTask->msgInfo.pData = NULL;
|
||||||
|
|
||||||
|
if (pTask->msgInfo.blockingTs != 0) {
|
||||||
|
int64_t el = taosGetTimestampMs() - pTask->msgInfo.blockingTs;
|
||||||
|
qDebug("s-task:%s resume to normal from inputQ blocking, idle time:%" PRId64 "ms", pTask->id.idStr, el);
|
||||||
|
pTask->msgInfo.blockingTs = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// now ready for next data output
|
||||||
|
atomic_store_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL);
|
||||||
|
|
||||||
|
// otherwise, continue dispatch the first block to down stream task in pipeline
|
||||||
|
streamDispatchStreamBlock(pTask);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -18,8 +18,6 @@
|
||||||
// maximum allowed processed block batches. One block may include several submit blocks
|
// maximum allowed processed block batches. One block may include several submit blocks
|
||||||
#define MAX_STREAM_RESULT_DUMP_THRESHOLD 100
|
#define MAX_STREAM_RESULT_DUMP_THRESHOLD 100
|
||||||
|
|
||||||
static int32_t updateCheckPointInfo(SStreamTask* pTask, int64_t checkpointId);
|
|
||||||
|
|
||||||
bool streamTaskShouldStop(const SStreamStatus* pStatus) {
|
bool streamTaskShouldStop(const SStreamStatus* pStatus) {
|
||||||
int32_t status = atomic_load_8((int8_t*)&pStatus->taskStatus);
|
int32_t status = atomic_load_8((int8_t*)&pStatus->taskStatus);
|
||||||
return (status == TASK_STATUS__STOP) || (status == TASK_STATUS__DROPPING);
|
return (status == TASK_STATUS__STOP) || (status == TASK_STATUS__DROPPING);
|
||||||
|
@ -314,10 +312,13 @@ static void waitForTaskIdle(SStreamTask* pTask, SStreamTask* pStreamTask) {
|
||||||
|
|
||||||
static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
|
static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
|
||||||
SStreamTask* pStreamTask = streamMetaAcquireTask(pTask->pMeta, pTask->streamTaskId.taskId);
|
SStreamTask* pStreamTask = streamMetaAcquireTask(pTask->pMeta, pTask->streamTaskId.taskId);
|
||||||
qDebug("s-task:%s scan history task end, update stream task:%s info, transfer exec state", pTask->id.idStr,
|
if (pStreamTask == NULL) {
|
||||||
pStreamTask->id.idStr);
|
qError("s-task:%s failed to find related stream task:0x%x, it may have been destoryed or closed",
|
||||||
|
pTask->id.idStr, pTask->streamTaskId.taskId);
|
||||||
// todo handle stream task is dropped here
|
return TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||||
|
} else {
|
||||||
|
qDebug("s-task:%s scan history task end, update stream task:%s info, transfer exec state", pTask->id.idStr, pStreamTask->id.idStr);
|
||||||
|
}
|
||||||
|
|
||||||
ASSERT(pStreamTask != NULL && pStreamTask->historyTaskId.taskId == pTask->id.taskId);
|
ASSERT(pStreamTask != NULL && pStreamTask->historyTaskId.taskId == pTask->id.taskId);
|
||||||
STimeWindow* pTimeWindow = &pStreamTask->dataRange.window;
|
STimeWindow* pTimeWindow = &pStreamTask->dataRange.window;
|
||||||
|
@ -432,6 +433,9 @@ int32_t streamExecForAll(SStreamTask* pTask) {
|
||||||
ASSERT(batchSize == 0);
|
ASSERT(batchSize == 0);
|
||||||
if (pTask->info.fillHistory && pTask->status.transferState) {
|
if (pTask->info.fillHistory && pTask->status.transferState) {
|
||||||
int32_t code = streamTransferStateToStreamTask(pTask);
|
int32_t code = streamTransferStateToStreamTask(pTask);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) { // todo handle this
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// no data in the inputQ, return now
|
// no data in the inputQ, return now
|
||||||
|
@ -514,7 +518,7 @@ int32_t streamTryExec(SStreamTask* pTask) {
|
||||||
|
|
||||||
if (schedStatus == TASK_SCHED_STATUS__WAITING) {
|
if (schedStatus == TASK_SCHED_STATUS__WAITING) {
|
||||||
int32_t code = streamExecForAll(pTask);
|
int32_t code = streamExecForAll(pTask);
|
||||||
if (code < 0) {
|
if (code < 0) { // todo this status shoudl be removed
|
||||||
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__FAILED);
|
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__FAILED);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -532,7 +536,8 @@ int32_t streamTryExec(SStreamTask* pTask) {
|
||||||
|
|
||||||
if (remain == 0) { // all tasks are in TASK_STATUS__CK_READY state
|
if (remain == 0) { // all tasks are in TASK_STATUS__CK_READY state
|
||||||
streamBackendDoCheckpoint(pMeta, pTask->checkpointingId);
|
streamBackendDoCheckpoint(pMeta, pTask->checkpointingId);
|
||||||
qDebug("vgId:%d do vnode wide checkpoint completed, checkpointId:%" PRId64, pMeta->vgId,
|
streamSaveTasks(pMeta, pTask->checkpointingId);
|
||||||
|
qDebug("vgId:%d vnode wide checkpoint completed, save all tasks status, checkpointId:%" PRId64, pMeta->vgId,
|
||||||
pTask->checkpointingId);
|
pTask->checkpointingId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -543,29 +548,10 @@ int32_t streamTryExec(SStreamTask* pTask) {
|
||||||
code = streamTaskSendCheckpointRsp(pTask);
|
code = streamTaskSendCheckpointRsp(pTask);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (code == TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
taosWLockLatch(&pTask->pMeta->lock);
|
|
||||||
|
|
||||||
ASSERT(pTask->chkInfo.keptCheckpointId < pTask->checkpointingId);
|
|
||||||
pTask->chkInfo.keptCheckpointId = pTask->checkpointingId;
|
|
||||||
|
|
||||||
streamMetaSaveTask(pTask->pMeta, pTask);
|
|
||||||
if (streamMetaCommit(pTask->pMeta) < 0) {
|
|
||||||
taosWUnLockLatch(&pTask->pMeta->lock);
|
|
||||||
qError("s-task:%s failed to commit stream meta after do checkpoint, checkpointId:%" PRId64 ", ver:%" PRId64
|
|
||||||
", since %s",
|
|
||||||
pTask->id.idStr, pTask->chkInfo.keptCheckpointId, pTask->chkInfo.version, terrstr());
|
|
||||||
return -1;
|
|
||||||
} else {
|
|
||||||
taosWUnLockLatch(&pTask->pMeta->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
qInfo("vgId:%d s-task:%s commit task status after checkpoint completed, checkpointId:%" PRId64 ", ver:%" PRId64
|
|
||||||
" currentVer:%" PRId64,
|
|
||||||
pMeta->vgId, pTask->id.idStr, pTask->chkInfo.keptCheckpointId, pTask->chkInfo.version,
|
|
||||||
pTask->chkInfo.currentVer);
|
|
||||||
} else {
|
|
||||||
// todo: let's retry send rsp to upstream/mnode
|
// todo: let's retry send rsp to upstream/mnode
|
||||||
|
qError("s-task:%s failed to send checkpoint rsp to upstream, checkpointId:%"PRId64", code:%s",
|
||||||
|
pTask->id.idStr, pTask->checkpointingId, tstrerror(code));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (!taosQueueEmpty(pTask->inputQueue->queue) && (!streamTaskShouldStop(&pTask->status)) &&
|
if (!taosQueueEmpty(pTask->inputQueue->queue) && (!streamTaskShouldStop(&pTask->status)) &&
|
||||||
|
|
|
@ -254,3 +254,18 @@ void tFreeStreamTask(SStreamTask* pTask) {
|
||||||
|
|
||||||
taosMemoryFree(pTask);
|
taosMemoryFree(pTask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t streamTaskGetNumOfDownstream(const SStreamTask* pTask) {
|
||||||
|
if (pTask->info.taskLevel == TASK_LEVEL__SINK) {
|
||||||
|
return 0;
|
||||||
|
} else if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||||
|
return 1;
|
||||||
|
} else {
|
||||||
|
if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH) {
|
||||||
|
return 1;
|
||||||
|
} else {
|
||||||
|
SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
|
||||||
|
return taosArrayGetSize(vgInfo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -49,7 +49,8 @@ struct SStreamFileState {
|
||||||
typedef SRowBuffPos SRowBuffInfo;
|
typedef SRowBuffPos SRowBuffInfo;
|
||||||
|
|
||||||
SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize,
|
SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize,
|
||||||
GetTsFun fp, void* pFile, TSKEY delMark, const char* idstr) {
|
GetTsFun fp, void* pFile, TSKEY delMark, const char* taskId,
|
||||||
|
int64_t checkpointId) {
|
||||||
if (memSize <= 0) {
|
if (memSize <= 0) {
|
||||||
memSize = DEFAULT_MAX_STREAM_BUFFER_SIZE;
|
memSize = DEFAULT_MAX_STREAM_BUFFER_SIZE;
|
||||||
}
|
}
|
||||||
|
@ -83,9 +84,9 @@ SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_
|
||||||
pFileState->deleteMark = delMark;
|
pFileState->deleteMark = delMark;
|
||||||
pFileState->flushMark = INT64_MIN;
|
pFileState->flushMark = INT64_MIN;
|
||||||
pFileState->maxTs = INT64_MIN;
|
pFileState->maxTs = INT64_MIN;
|
||||||
pFileState->id = taosStrdup(idstr);
|
pFileState->id = taosStrdup(taskId);
|
||||||
|
|
||||||
recoverSnapshot(pFileState);
|
recoverSnapshot(pFileState, checkpointId);
|
||||||
return pFileState;
|
return pFileState;
|
||||||
|
|
||||||
_error:
|
_error:
|
||||||
|
@ -479,7 +480,7 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t recoverSnapshot(SStreamFileState* pFileState) {
|
int32_t recoverSnapshot(SStreamFileState* pFileState, int64_t ckId) {
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
if (pFileState->maxTs != INT64_MIN) {
|
if (pFileState->maxTs != INT64_MIN) {
|
||||||
int64_t mark = (INT64_MIN + pFileState->deleteMark >= pFileState->maxTs)
|
int64_t mark = (INT64_MIN + pFileState->deleteMark >= pFileState->maxTs)
|
||||||
|
|
|
@ -233,7 +233,11 @@ int tdbBtreeDelete(SBTree *pBt, const void *pKey, int kLen, TXN *pTxn) {
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
tdbBtcOpen(&btc, pBt, pTxn);
|
tdbBtcOpen(&btc, pBt, pTxn);
|
||||||
|
/*
|
||||||
|
btc.coder.ofps = taosArrayInit(8, sizeof(SPage *));
|
||||||
|
// btc.coder.ofps = taosArrayInit(8, sizeof(SPgno));
|
||||||
|
//pBtc->coder.ofps = taosArrayInit(8, sizeof(SPage *));
|
||||||
|
*/
|
||||||
tdbTrace("tdb delete, btc: %p, pTxn: %p", &btc, pTxn);
|
tdbTrace("tdb delete, btc: %p, pTxn: %p", &btc, pTxn);
|
||||||
|
|
||||||
// move the cursor
|
// move the cursor
|
||||||
|
@ -254,7 +258,18 @@ int tdbBtreeDelete(SBTree *pBt, const void *pKey, int kLen, TXN *pTxn) {
|
||||||
tdbBtcClose(&btc);
|
tdbBtcClose(&btc);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
SArray *ofps = btc.coder.ofps;
|
||||||
|
if (ofps) {
|
||||||
|
for (int i = 0; i < TARRAY_SIZE(ofps); ++i) {
|
||||||
|
SPage *ofp = *(SPage **)taosArrayGet(ofps, i);
|
||||||
|
tdbPagerInsertFreePage(btc.pBt->pPager, ofp, btc.pTxn);
|
||||||
|
}
|
||||||
|
|
||||||
|
taosArrayDestroy(ofps);
|
||||||
|
btc.coder.ofps = NULL;
|
||||||
|
}
|
||||||
|
*/
|
||||||
tdbBtcClose(&btc);
|
tdbBtcClose(&btc);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -563,6 +578,7 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// copy the parent key out if child pages are not leaf page
|
// copy the parent key out if child pages are not leaf page
|
||||||
|
// childNotLeaf = !(TDB_BTREE_PAGE_IS_LEAF(pOlds[0]) || TDB_BTREE_PAGE_IS_OVFL(pOlds[0]));
|
||||||
childNotLeaf = !TDB_BTREE_PAGE_IS_LEAF(pOlds[0]);
|
childNotLeaf = !TDB_BTREE_PAGE_IS_LEAF(pOlds[0]);
|
||||||
if (childNotLeaf) {
|
if (childNotLeaf) {
|
||||||
for (int i = 0; i < nOlds; i++) {
|
for (int i = 0; i < nOlds; i++) {
|
||||||
|
@ -592,7 +608,30 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
|
||||||
for (int i = 0; i < nOlds; i++) {
|
for (int i = 0; i < nOlds; i++) {
|
||||||
nCells = TDB_PAGE_TOTAL_CELLS(pParent);
|
nCells = TDB_PAGE_TOTAL_CELLS(pParent);
|
||||||
if (sIdx < nCells) {
|
if (sIdx < nCells) {
|
||||||
|
bool destroyOfps = false;
|
||||||
|
if (!childNotLeaf) {
|
||||||
|
if (!pParent->pPager->ofps) {
|
||||||
|
pParent->pPager->ofps = taosArrayInit(8, sizeof(SPage *));
|
||||||
|
destroyOfps = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
tdbPageDropCell(pParent, sIdx, pTxn, pBt);
|
tdbPageDropCell(pParent, sIdx, pTxn, pBt);
|
||||||
|
|
||||||
|
if (!childNotLeaf) {
|
||||||
|
SArray *ofps = pParent->pPager->ofps;
|
||||||
|
if (ofps) {
|
||||||
|
for (int i = 0; i < TARRAY_SIZE(ofps); ++i) {
|
||||||
|
SPage *ofp = *(SPage **)taosArrayGet(ofps, i);
|
||||||
|
tdbPagerInsertFreePage(pParent->pPager, ofp, pTxn);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (destroyOfps) {
|
||||||
|
taosArrayDestroy(ofps);
|
||||||
|
pParent->pPager->ofps = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
((SIntHdr *)pParent->pData)->pgno = 0;
|
((SIntHdr *)pParent->pData)->pgno = 0;
|
||||||
}
|
}
|
||||||
|
@ -861,6 +900,8 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
|
||||||
if (!TDB_BTREE_PAGE_IS_LEAF(pNews[0])) {
|
if (!TDB_BTREE_PAGE_IS_LEAF(pNews[0])) {
|
||||||
((SIntHdr *)(pParent->pData))->pgno = ((SIntHdr *)(pNews[0]->pData))->pgno;
|
((SIntHdr *)(pParent->pData))->pgno = ((SIntHdr *)(pNews[0]->pData))->pgno;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tdbPagerInsertFreePage(pBt->pPager, pNews[0], pTxn);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
|
@ -870,6 +911,9 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
|
||||||
}
|
}
|
||||||
|
|
||||||
for (pageIdx = 0; pageIdx < nOlds; ++pageIdx) {
|
for (pageIdx = 0; pageIdx < nOlds; ++pageIdx) {
|
||||||
|
if (pageIdx >= nNews) {
|
||||||
|
tdbPagerInsertFreePage(pBt->pPager, pOlds[pageIdx], pTxn);
|
||||||
|
}
|
||||||
tdbPagerReturnPage(pBt->pPager, pOlds[pageIdx], pTxn);
|
tdbPagerReturnPage(pBt->pPager, pOlds[pageIdx], pTxn);
|
||||||
}
|
}
|
||||||
for (; pageIdx < nNews; ++pageIdx) {
|
for (; pageIdx < nNews; ++pageIdx) {
|
||||||
|
@ -1311,7 +1355,11 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader,
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
if (pDecoder->ofps) {
|
||||||
|
taosArrayPush(pDecoder->ofps, &ofp);
|
||||||
|
}
|
||||||
|
*/
|
||||||
ofpCell = tdbPageGetCell(ofp, 0);
|
ofpCell = tdbPageGetCell(ofp, 0);
|
||||||
|
|
||||||
if (nLeft <= ofp->maxLocal - sizeof(SPgno)) {
|
if (nLeft <= ofp->maxLocal - sizeof(SPgno)) {
|
||||||
|
@ -1346,11 +1394,17 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader,
|
||||||
int lastKeyPageSpace = 0;
|
int lastKeyPageSpace = 0;
|
||||||
// load left key & val to ovpages
|
// load left key & val to ovpages
|
||||||
while (pgno != 0) {
|
while (pgno != 0) {
|
||||||
|
tdbTrace("tdb decode-ofp, pTxn: %p, pgno:%u by cell:%p", pTxn, pgno, pCell);
|
||||||
|
// printf("tdb decode-ofp, pTxn: %p, pgno:%u by cell:%p\n", pTxn, pgno, pCell);
|
||||||
ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt);
|
ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
if (pDecoder->ofps) {
|
||||||
|
taosArrayPush(pDecoder->ofps, &ofp);
|
||||||
|
}
|
||||||
|
*/
|
||||||
ofpCell = tdbPageGetCell(ofp, 0);
|
ofpCell = tdbPageGetCell(ofp, 0);
|
||||||
|
|
||||||
int lastKeyPage = 0;
|
int lastKeyPage = 0;
|
||||||
|
@ -1518,8 +1572,8 @@ static int tdbBtreeCellSize(const SPage *pPage, SCell *pCell, int dropOfp, TXN *
|
||||||
|
|
||||||
if (pPage->vLen == TDB_VARIANT_LEN) {
|
if (pPage->vLen == TDB_VARIANT_LEN) {
|
||||||
if (!leaf) {
|
if (!leaf) {
|
||||||
tdbError("tdb/btree-cell-size: not a leaf page.");
|
tdbError("tdb/btree-cell-size: not a leaf page:%p, pgno:%" PRIu32 ".", pPage, TDB_PAGE_PGNO(pPage));
|
||||||
return -1;
|
// return -1;
|
||||||
}
|
}
|
||||||
nHeader += tdbGetVarInt(pCell + nHeader, &vLen);
|
nHeader += tdbGetVarInt(pCell + nHeader, &vLen);
|
||||||
} else if (leaf) {
|
} else if (leaf) {
|
||||||
|
@ -1559,8 +1613,27 @@ static int tdbBtreeCellSize(const SPage *pPage, SCell *pCell, int dropOfp, TXN *
|
||||||
bytes = ofp->maxLocal - sizeof(SPgno);
|
bytes = ofp->maxLocal - sizeof(SPgno);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SPgno origPgno = pgno;
|
||||||
memcpy(&pgno, ofpCell + bytes, sizeof(pgno));
|
memcpy(&pgno, ofpCell + bytes, sizeof(pgno));
|
||||||
|
|
||||||
|
ret = tdbPagerWrite(pBt->pPager, ofp);
|
||||||
|
if (ret < 0) {
|
||||||
|
tdbError("failed to write page since %s", terrstr());
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
tdbPageDropCell(ofp, 0, pTxn, pBt);
|
||||||
|
*/
|
||||||
|
// SIntHdr *pIntHdr = (SIntHdr *)(ofp->pData);
|
||||||
|
// pIntHdr->flags = TDB_FLAG_ADD(0, TDB_BTREE_OVFL);
|
||||||
|
// pIntHdr->pgno = 0;
|
||||||
|
// ofp->pPager = NULL;
|
||||||
|
|
||||||
|
SArray *ofps = pPage->pPager->ofps;
|
||||||
|
if (ofps) {
|
||||||
|
taosArrayPush(ofps, &ofp);
|
||||||
|
}
|
||||||
|
|
||||||
tdbPagerReturnPage(pPage->pPager, ofp, pTxn);
|
tdbPagerReturnPage(pPage->pPager, ofp, pTxn);
|
||||||
|
|
||||||
nLeft -= bytes;
|
nLeft -= bytes;
|
||||||
|
@ -1980,6 +2053,11 @@ static int tdbBtcMoveDownward(SBTC *pBtc) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (TDB_BTREE_PAGE_IS_OVFL(pBtc->pPage)) {
|
||||||
|
tdbError("tdb/btc-move-downward: should not be a ovfl page here.");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
if (pBtc->idx < TDB_PAGE_TOTAL_CELLS(pBtc->pPage)) {
|
if (pBtc->idx < TDB_PAGE_TOTAL_CELLS(pBtc->pPage)) {
|
||||||
pCell = tdbPageGetCell(pBtc->pPage, pBtc->idx);
|
pCell = tdbPageGetCell(pBtc->pPage, pBtc->idx);
|
||||||
pgno = ((SPgno *)pCell)[0];
|
pgno = ((SPgno *)pCell)[0];
|
||||||
|
@ -2068,8 +2146,27 @@ int tdbBtcDelete(SBTC *pBtc) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool destroyOfps = false;
|
||||||
|
if (!pBtc->pPage->pPager->ofps) {
|
||||||
|
pBtc->pPage->pPager->ofps = taosArrayInit(8, sizeof(SPage *));
|
||||||
|
destroyOfps = true;
|
||||||
|
}
|
||||||
|
|
||||||
tdbPageDropCell(pBtc->pPage, idx, pBtc->pTxn, pBtc->pBt);
|
tdbPageDropCell(pBtc->pPage, idx, pBtc->pTxn, pBtc->pBt);
|
||||||
|
|
||||||
|
SArray *ofps = pBtc->pPage->pPager->ofps;
|
||||||
|
if (ofps) {
|
||||||
|
for (int i = 0; i < TARRAY_SIZE(ofps); ++i) {
|
||||||
|
SPage *ofp = *(SPage **)taosArrayGet(ofps, i);
|
||||||
|
tdbPagerInsertFreePage(pBtc->pPage->pPager, ofp, pBtc->pTxn);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (destroyOfps) {
|
||||||
|
taosArrayDestroy(ofps);
|
||||||
|
pBtc->pPage->pPager->ofps = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// update interior page or do balance
|
// update interior page or do balance
|
||||||
if (idx == nCells - 1) {
|
if (idx == nCells - 1) {
|
||||||
if (idx) {
|
if (idx) {
|
||||||
|
@ -2113,6 +2210,8 @@ int tdbBtcDelete(SBTC *pBtc) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// printf("tdb/btc-delete: btree balance delete pgno: %d.\n", TDB_PAGE_PGNO(pBtc->pPage));
|
||||||
|
|
||||||
ret = tdbBtreeBalance(pBtc);
|
ret = tdbBtreeBalance(pBtc);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
tdbError("tdb/btc-delete: btree balance failed with ret: %d.", ret);
|
tdbError("tdb/btc-delete: btree balance failed with ret: %d.", ret);
|
||||||
|
@ -2181,7 +2280,13 @@ int tdbBtcUpsert(SBTC *pBtc, const void *pKey, int kLen, const void *pData, int
|
||||||
tdbError("tdb/btc-upsert: page insert/update cell failed with ret: %d.", ret);
|
tdbError("tdb/btc-upsert: page insert/update cell failed with ret: %d.", ret);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
bool destroyOfps = false;
|
||||||
|
if (!pBtc->pPage->pPager->ofps) {
|
||||||
|
pBtc->pPage->pPager->ofps = taosArrayInit(8, sizeof(SPage *));
|
||||||
|
destroyOfps = true;
|
||||||
|
}
|
||||||
|
*/
|
||||||
// check balance
|
// check balance
|
||||||
if (pBtc->pPage->nOverflow > 0) {
|
if (pBtc->pPage->nOverflow > 0) {
|
||||||
ret = tdbBtreeBalance(pBtc);
|
ret = tdbBtreeBalance(pBtc);
|
||||||
|
@ -2190,7 +2295,20 @@ int tdbBtcUpsert(SBTC *pBtc, const void *pKey, int kLen, const void *pData, int
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
SArray *ofps = pBtc->pPage->pPager->ofps;
|
||||||
|
if (ofps) {
|
||||||
|
for (int i = 0; i < TARRAY_SIZE(ofps); ++i) {
|
||||||
|
SPage *ofp = *(SPage **)taosArrayGet(ofps, i);
|
||||||
|
tdbPagerInsertFreePage(pBtc->pPage->pPager, ofp, pBtc->pTxn);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (destroyOfps) {
|
||||||
|
taosArrayDestroy(ofps);
|
||||||
|
pBtc->pPage->pPager->ofps = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -70,6 +70,11 @@ int32_t tdbOpen(const char *dbname, int32_t szPage, int32_t pages, TDB **ppDb, i
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = tdbTbOpen(TDB_FREEDB_NAME, sizeof(SPgno), 0, NULL, pDb, &pDb->pFreeDb, rollback);
|
||||||
|
if (ret < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
*ppDb = pDb;
|
*ppDb = pDb;
|
||||||
|
@ -82,6 +87,7 @@ int tdbClose(TDB *pDb) {
|
||||||
if (pDb) {
|
if (pDb) {
|
||||||
#ifdef USE_MAINDB
|
#ifdef USE_MAINDB
|
||||||
if (pDb->pMainDb) tdbTbClose(pDb->pMainDb);
|
if (pDb->pMainDb) tdbTbClose(pDb->pMainDb);
|
||||||
|
if (pDb->pFreeDb) tdbTbClose(pDb->pFreeDb);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
for (pPager = pDb->pgrList; pPager; pPager = pDb->pgrList) {
|
for (pPager = pDb->pgrList; pPager; pPager = pDb->pgrList) {
|
||||||
|
|
|
@ -292,7 +292,23 @@ int tdbPagerBegin(SPager *pPager, TXN *pTxn) {
|
||||||
*/
|
*/
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
int tdbPagerCancelDirty(SPager *pPager, SPage *pPage, TXN *pTxn) {
|
||||||
|
SRBTreeNode *pNode = tRBTreeGet(&pPager->rbt, (SRBTreeNode *)pPage);
|
||||||
|
if (pNode) {
|
||||||
|
pPage->isDirty = 0;
|
||||||
|
|
||||||
|
tRBTreeDrop(&pPager->rbt, (SRBTreeNode *)pPage);
|
||||||
|
if (pTxn->jPageSet) {
|
||||||
|
hashset_remove(pTxn->jPageSet, (void *)((long)TDB_PAGE_PGNO(pPage)));
|
||||||
|
}
|
||||||
|
|
||||||
|
tdbPCacheRelease(pPager->pCache, pPage, pTxn);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
*/
|
||||||
int tdbPagerCommit(SPager *pPager, TXN *pTxn) {
|
int tdbPagerCommit(SPager *pPager, TXN *pTxn) {
|
||||||
SPage *pPage;
|
SPage *pPage;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -338,10 +354,13 @@ int tdbPagerCommit(SPager *pPager, TXN *pTxn) {
|
||||||
if (pTxn->jPageSet) {
|
if (pTxn->jPageSet) {
|
||||||
hashset_remove(pTxn->jPageSet, (void *)((long)TDB_PAGE_PGNO(pPage)));
|
hashset_remove(pTxn->jPageSet, (void *)((long)TDB_PAGE_PGNO(pPage)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tdbTrace("tdb/pager-commit: remove page: %p %d from dirty tree: %p", pPage, TDB_PAGE_PGNO(pPage), &pPager->rbt);
|
||||||
|
|
||||||
tdbPCacheRelease(pPager->pCache, pPage, pTxn);
|
tdbPCacheRelease(pPager->pCache, pPage, pTxn);
|
||||||
}
|
}
|
||||||
|
|
||||||
tdbTrace("pager/commit reset dirty tree: %p", &pPager->rbt);
|
tdbTrace("tdb/pager-commit reset dirty tree: %p", &pPager->rbt);
|
||||||
tRBTreeCreate(&pPager->rbt, pageCmpFn);
|
tRBTreeCreate(&pPager->rbt, pageCmpFn);
|
||||||
|
|
||||||
// sync the db file
|
// sync the db file
|
||||||
|
@ -629,6 +648,8 @@ int tdbPagerFlushPage(SPager *pPager, TXN *pTxn) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int tdbPagerAllocPage(SPager *pPager, SPgno *ppgno, TXN *pTxn);
|
||||||
|
|
||||||
int tdbPagerFetchPage(SPager *pPager, SPgno *ppgno, SPage **ppPage, int (*initPage)(SPage *, void *, int), void *arg,
|
int tdbPagerFetchPage(SPager *pPager, SPgno *ppgno, SPage **ppPage, int (*initPage)(SPage *, void *, int), void *arg,
|
||||||
TXN *pTxn) {
|
TXN *pTxn) {
|
||||||
SPage *pPage;
|
SPage *pPage;
|
||||||
|
@ -643,7 +664,7 @@ int tdbPagerFetchPage(SPager *pPager, SPgno *ppgno, SPage **ppPage, int (*initPa
|
||||||
// alloc new page
|
// alloc new page
|
||||||
if (pgno == 0) {
|
if (pgno == 0) {
|
||||||
loadPage = 0;
|
loadPage = 0;
|
||||||
ret = tdbPagerAllocPage(pPager, &pgno);
|
ret = tdbPagerAllocPage(pPager, &pgno, pTxn);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
tdbError("tdb/pager: %p, ret: %d pgno: %" PRIu32 ", alloc page failed.", pPager, ret, pgno);
|
tdbError("tdb/pager: %p, ret: %d pgno: %" PRIu32 ", alloc page failed.", pPager, ret, pgno);
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -695,23 +716,114 @@ void tdbPagerReturnPage(SPager *pPager, SPage *pPage, TXN *pTxn) {
|
||||||
// TDB_PAGE_PGNO(pPage), pPage);
|
// TDB_PAGE_PGNO(pPage), pPage);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tdbPagerAllocFreePage(SPager *pPager, SPgno *ppgno) {
|
int tdbPagerInsertFreePage(SPager *pPager, SPage *pPage, TXN *pTxn) {
|
||||||
// TODO: Allocate a page from the free list
|
int code = 0;
|
||||||
|
SPgno pgno = TDB_PAGE_PGNO(pPage);
|
||||||
|
|
||||||
|
if (pPager->frps) {
|
||||||
|
taosArrayPush(pPager->frps, &pgno);
|
||||||
|
pPage->pPager = NULL;
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
pPager->frps = taosArrayInit(8, sizeof(SPgno));
|
||||||
|
// memset(pPage->pData, 0, pPage->pageSize);
|
||||||
|
tdbTrace("tdb/insert-free-page: tbc recycle page: %d.", pgno);
|
||||||
|
// printf("tdb/insert-free-page: tbc recycle page: %d.\n", pgno);
|
||||||
|
code = tdbTbInsert(pPager->pEnv->pFreeDb, &pgno, sizeof(pgno), NULL, 0, pTxn);
|
||||||
|
if (code < 0) {
|
||||||
|
tdbError("tdb/insert-free-page: tb insert failed with ret: %d.", code);
|
||||||
|
taosArrayDestroy(pPager->frps);
|
||||||
|
pPager->frps = NULL;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (TARRAY_SIZE(pPager->frps) > 0) {
|
||||||
|
pgno = *(SPgno *)taosArrayPop(pPager->frps);
|
||||||
|
|
||||||
|
code = tdbTbInsert(pPager->pEnv->pFreeDb, &pgno, sizeof(pgno), NULL, 0, pTxn);
|
||||||
|
if (code < 0) {
|
||||||
|
tdbError("tdb/insert-free-page: tb insert failed with ret: %d.", code);
|
||||||
|
taosArrayDestroy(pPager->frps);
|
||||||
|
pPager->frps = NULL;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
taosArrayDestroy(pPager->frps);
|
||||||
|
pPager->frps = NULL;
|
||||||
|
|
||||||
|
pPage->pPager = NULL;
|
||||||
|
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tdbPagerRemoveFreePage(SPager *pPager, SPgno *pPgno, TXN *pTxn) {
|
||||||
|
int code = 0;
|
||||||
|
TBC *pCur;
|
||||||
|
|
||||||
|
if (!pPager->pEnv->pFreeDb) {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pPager->frps) {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
code = tdbTbcOpen(pPager->pEnv->pFreeDb, &pCur, pTxn);
|
||||||
|
if (code < 0) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
code = tdbTbcMoveToFirst(pCur);
|
||||||
|
if (code) {
|
||||||
|
tdbError("tdb/remove-free-page: moveto first failed with ret: %d.", code);
|
||||||
|
tdbTbcClose(pCur);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void *pKey = NULL;
|
||||||
|
int nKey = 0;
|
||||||
|
|
||||||
|
code = tdbTbcGet(pCur, (const void **)&pKey, &nKey, NULL, NULL);
|
||||||
|
if (code < 0) {
|
||||||
|
// tdbError("tdb/remove-free-page: tbc get failed with ret: %d.", code);
|
||||||
|
tdbTbcClose(pCur);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
*pPgno = *(SPgno *)pKey;
|
||||||
|
tdbTrace("tdb/remove-free-page: tbc get page: %d.", *pPgno);
|
||||||
|
// printf("tdb/remove-free-page: tbc get page: %d.\n", *pPgno);
|
||||||
|
|
||||||
|
code = tdbTbcDelete(pCur);
|
||||||
|
if (code < 0) {
|
||||||
|
tdbError("tdb/remove-free-page: tbc delete failed with ret: %d.", code);
|
||||||
|
tdbTbcClose(pCur);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
tdbTbcClose(pCur);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int tdbPagerAllocFreePage(SPager *pPager, SPgno *ppgno, TXN *pTxn) {
|
||||||
|
// Allocate a page from the free list
|
||||||
|
return tdbPagerRemoveFreePage(pPager, ppgno, pTxn);
|
||||||
|
}
|
||||||
|
|
||||||
static int tdbPagerAllocNewPage(SPager *pPager, SPgno *ppgno) {
|
static int tdbPagerAllocNewPage(SPager *pPager, SPgno *ppgno) {
|
||||||
*ppgno = ++pPager->dbFileSize;
|
*ppgno = ++pPager->dbFileSize;
|
||||||
|
// tdbError("tdb/alloc-new-page: %d.", *ppgno);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int tdbPagerAllocPage(SPager *pPager, SPgno *ppgno) {
|
static int tdbPagerAllocPage(SPager *pPager, SPgno *ppgno, TXN *pTxn) {
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
*ppgno = 0;
|
*ppgno = 0;
|
||||||
|
|
||||||
// Try to allocate from the free list of the pager
|
// Try to allocate from the free list of the pager
|
||||||
ret = tdbPagerAllocFreePage(pPager, ppgno);
|
ret = tdbPagerAllocFreePage(pPager, ppgno, pTxn);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -131,13 +131,14 @@ typedef struct SBtInfo {
|
||||||
#define TDB_CELLDECODER_FREE_VAL(pCellDecoder) ((pCellDecoder)->freeKV & TDB_CELLD_F_VAL)
|
#define TDB_CELLDECODER_FREE_VAL(pCellDecoder) ((pCellDecoder)->freeKV & TDB_CELLD_F_VAL)
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int kLen;
|
int kLen;
|
||||||
u8 *pKey;
|
u8 *pKey;
|
||||||
int vLen;
|
int vLen;
|
||||||
u8 *pVal;
|
u8 *pVal;
|
||||||
SPgno pgno;
|
SPgno pgno;
|
||||||
u8 *pBuf;
|
u8 *pBuf;
|
||||||
u8 freeKV;
|
u8 freeKV;
|
||||||
|
SArray *ofps;
|
||||||
} SCellDecoder;
|
} SCellDecoder;
|
||||||
|
|
||||||
struct SBTC {
|
struct SBTC {
|
||||||
|
@ -198,9 +199,10 @@ int tdbPagerAbort(SPager *pPager, TXN *pTxn);
|
||||||
int tdbPagerFetchPage(SPager *pPager, SPgno *ppgno, SPage **ppPage, int (*initPage)(SPage *, void *, int), void *arg,
|
int tdbPagerFetchPage(SPager *pPager, SPgno *ppgno, SPage **ppPage, int (*initPage)(SPage *, void *, int), void *arg,
|
||||||
TXN *pTxn);
|
TXN *pTxn);
|
||||||
void tdbPagerReturnPage(SPager *pPager, SPage *pPage, TXN *pTxn);
|
void tdbPagerReturnPage(SPager *pPager, SPage *pPage, TXN *pTxn);
|
||||||
int tdbPagerAllocPage(SPager *pPager, SPgno *ppgno);
|
int tdbPagerInsertFreePage(SPager *pPager, SPage *pPage, TXN *pTxn);
|
||||||
int tdbPagerRestoreJournals(SPager *pPager);
|
// int tdbPagerAllocPage(SPager *pPager, SPgno *ppgno);
|
||||||
int tdbPagerRollback(SPager *pPager);
|
int tdbPagerRestoreJournals(SPager *pPager);
|
||||||
|
int tdbPagerRollback(SPager *pPager);
|
||||||
|
|
||||||
// tdbPCache.c ====================================
|
// tdbPCache.c ====================================
|
||||||
#define TDB_PCACHE_PAGE \
|
#define TDB_PCACHE_PAGE \
|
||||||
|
@ -373,6 +375,7 @@ static inline SCell *tdbPageGetCell(SPage *pPage, int idx) {
|
||||||
|
|
||||||
#ifdef USE_MAINDB
|
#ifdef USE_MAINDB
|
||||||
#define TDB_MAINDB_NAME "main.tdb"
|
#define TDB_MAINDB_NAME "main.tdb"
|
||||||
|
#define TDB_FREEDB_NAME "_free.db"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct STDB {
|
struct STDB {
|
||||||
|
@ -386,6 +389,7 @@ struct STDB {
|
||||||
SPager **pgrHash;
|
SPager **pgrHash;
|
||||||
#ifdef USE_MAINDB
|
#ifdef USE_MAINDB
|
||||||
TTB *pMainDb;
|
TTB *pMainDb;
|
||||||
|
TTB *pFreeDb;
|
||||||
#endif
|
#endif
|
||||||
int64_t txnId;
|
int64_t txnId;
|
||||||
};
|
};
|
||||||
|
@ -403,6 +407,8 @@ struct SPager {
|
||||||
SRBTree rbt;
|
SRBTree rbt;
|
||||||
// u8 inTran;
|
// u8 inTran;
|
||||||
TXN *pActiveTxn;
|
TXN *pActiveTxn;
|
||||||
|
SArray *ofps;
|
||||||
|
SArray *frps;
|
||||||
SPager *pNext; // used by TDB
|
SPager *pNext; // used by TDB
|
||||||
SPager *pHashNext; // used by TDB
|
SPager *pHashNext; // used by TDB
|
||||||
#ifdef USE_MAINDB
|
#ifdef USE_MAINDB
|
||||||
|
|
|
@ -14,3 +14,7 @@ target_link_libraries(tdbExOVFLTest tdb gtest gtest_main)
|
||||||
add_executable(tdbPageDefragmentTest "tdbPageDefragmentTest.cpp")
|
add_executable(tdbPageDefragmentTest "tdbPageDefragmentTest.cpp")
|
||||||
target_link_libraries(tdbPageDefragmentTest tdb gtest gtest_main)
|
target_link_libraries(tdbPageDefragmentTest tdb gtest gtest_main)
|
||||||
|
|
||||||
|
# page recycling testing
|
||||||
|
add_executable(tdbPageRecycleTest "tdbPageRecycleTest.cpp")
|
||||||
|
target_link_libraries(tdbPageRecycleTest tdb gtest gtest_main)
|
||||||
|
|
||||||
|
|
|
@ -190,6 +190,15 @@ static void insertOfp(void) {
|
||||||
// commit current transaction
|
// commit current transaction
|
||||||
tdbCommit(pEnv, txn);
|
tdbCommit(pEnv, txn);
|
||||||
tdbPostCommit(pEnv, txn);
|
tdbPostCommit(pEnv, txn);
|
||||||
|
|
||||||
|
closePool(pPool);
|
||||||
|
|
||||||
|
// Close a database
|
||||||
|
tdbTbClose(pDb);
|
||||||
|
|
||||||
|
// Close Env
|
||||||
|
ret = tdbClose(pEnv);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TEST(TdbOVFLPagesTest, DISABLED_TbInsertTest) {
|
// TEST(TdbOVFLPagesTest, DISABLED_TbInsertTest) {
|
||||||
|
@ -233,6 +242,13 @@ TEST(TdbOVFLPagesTest, TbGetTest) {
|
||||||
|
|
||||||
tdbFree(pVal);
|
tdbFree(pVal);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Close a database
|
||||||
|
tdbTbClose(pDb);
|
||||||
|
|
||||||
|
// Close Env
|
||||||
|
ret = tdbClose(pEnv);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TEST(TdbOVFLPagesTest, DISABLED_TbDeleteTest) {
|
// TEST(TdbOVFLPagesTest, DISABLED_TbDeleteTest) {
|
||||||
|
@ -334,6 +350,15 @@ tdbBegin(pEnv, &txn);
|
||||||
// commit current transaction
|
// commit current transaction
|
||||||
tdbCommit(pEnv, txn);
|
tdbCommit(pEnv, txn);
|
||||||
tdbPostCommit(pEnv, txn);
|
tdbPostCommit(pEnv, txn);
|
||||||
|
|
||||||
|
closePool(pPool);
|
||||||
|
|
||||||
|
// Close a database
|
||||||
|
tdbTbClose(pDb);
|
||||||
|
|
||||||
|
// Close Env
|
||||||
|
ret = tdbClose(pEnv);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TEST(tdb_test, DISABLED_simple_insert1) {
|
// TEST(tdb_test, DISABLED_simple_insert1) {
|
||||||
|
@ -407,6 +432,8 @@ TEST(tdb_test, simple_insert1) {
|
||||||
tdbCommit(pEnv, txn);
|
tdbCommit(pEnv, txn);
|
||||||
tdbPostCommit(pEnv, txn);
|
tdbPostCommit(pEnv, txn);
|
||||||
|
|
||||||
|
closePool(pPool);
|
||||||
|
|
||||||
{ // Query the data
|
{ // Query the data
|
||||||
void *pVal = NULL;
|
void *pVal = NULL;
|
||||||
int vLen;
|
int vLen;
|
||||||
|
|
|
@ -0,0 +1,835 @@
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#define ALLOW_FORBID_FUNC
|
||||||
|
#include "os.h"
|
||||||
|
#include "tdb.h"
|
||||||
|
|
||||||
|
#include <shared_mutex>
|
||||||
|
#include <string>
|
||||||
|
#include <thread>
|
||||||
|
#include <vector>
|
||||||
|
#include "tlog.h"
|
||||||
|
|
||||||
|
typedef struct SPoolMem {
|
||||||
|
int64_t size;
|
||||||
|
struct SPoolMem *prev;
|
||||||
|
struct SPoolMem *next;
|
||||||
|
} SPoolMem;
|
||||||
|
|
||||||
|
static SPoolMem *openPool() {
|
||||||
|
SPoolMem *pPool = (SPoolMem *)taosMemoryMalloc(sizeof(*pPool));
|
||||||
|
|
||||||
|
pPool->prev = pPool->next = pPool;
|
||||||
|
pPool->size = 0;
|
||||||
|
|
||||||
|
return pPool;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void clearPool(SPoolMem *pPool) {
|
||||||
|
SPoolMem *pMem;
|
||||||
|
|
||||||
|
do {
|
||||||
|
pMem = pPool->next;
|
||||||
|
|
||||||
|
if (pMem == pPool) break;
|
||||||
|
|
||||||
|
pMem->next->prev = pMem->prev;
|
||||||
|
pMem->prev->next = pMem->next;
|
||||||
|
pPool->size -= pMem->size;
|
||||||
|
|
||||||
|
taosMemoryFree(pMem);
|
||||||
|
} while (1);
|
||||||
|
|
||||||
|
assert(pPool->size == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void closePool(SPoolMem *pPool) {
|
||||||
|
clearPool(pPool);
|
||||||
|
taosMemoryFree(pPool);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void *poolMalloc(void *arg, size_t size) {
|
||||||
|
void *ptr = NULL;
|
||||||
|
SPoolMem *pPool = (SPoolMem *)arg;
|
||||||
|
SPoolMem *pMem;
|
||||||
|
|
||||||
|
pMem = (SPoolMem *)taosMemoryMalloc(sizeof(*pMem) + size);
|
||||||
|
if (pMem == NULL) {
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
pMem->size = sizeof(*pMem) + size;
|
||||||
|
pMem->next = pPool->next;
|
||||||
|
pMem->prev = pPool;
|
||||||
|
|
||||||
|
pPool->next->prev = pMem;
|
||||||
|
pPool->next = pMem;
|
||||||
|
pPool->size += pMem->size;
|
||||||
|
|
||||||
|
ptr = (void *)(&pMem[1]);
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void poolFree(void *arg, void *ptr) {
|
||||||
|
SPoolMem *pPool = (SPoolMem *)arg;
|
||||||
|
SPoolMem *pMem;
|
||||||
|
|
||||||
|
pMem = &(((SPoolMem *)ptr)[-1]);
|
||||||
|
|
||||||
|
pMem->next->prev = pMem->prev;
|
||||||
|
pMem->prev->next = pMem->next;
|
||||||
|
pPool->size -= pMem->size;
|
||||||
|
|
||||||
|
taosMemoryFree(pMem);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2) {
|
||||||
|
int k1, k2;
|
||||||
|
|
||||||
|
std::string s1((char *)pKey1 + 3, kLen1 - 3);
|
||||||
|
std::string s2((char *)pKey2 + 3, kLen2 - 3);
|
||||||
|
k1 = stoi(s1);
|
||||||
|
k2 = stoi(s2);
|
||||||
|
|
||||||
|
if (k1 < k2) {
|
||||||
|
return -1;
|
||||||
|
} else if (k1 > k2) {
|
||||||
|
return 1;
|
||||||
|
} else {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tDefaultKeyCmpr(const void *pKey1, int keyLen1, const void *pKey2, int keyLen2) {
|
||||||
|
int mlen;
|
||||||
|
int cret;
|
||||||
|
|
||||||
|
ASSERT(keyLen1 > 0 && keyLen2 > 0 && pKey1 != NULL && pKey2 != NULL);
|
||||||
|
|
||||||
|
mlen = keyLen1 < keyLen2 ? keyLen1 : keyLen2;
|
||||||
|
cret = memcmp(pKey1, pKey2, mlen);
|
||||||
|
if (cret == 0) {
|
||||||
|
if (keyLen1 < keyLen2) {
|
||||||
|
cret = -1;
|
||||||
|
} else if (keyLen1 > keyLen2) {
|
||||||
|
cret = 1;
|
||||||
|
} else {
|
||||||
|
cret = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static TDB *openEnv(char const *envName, int const pageSize, int const pageNum) {
|
||||||
|
TDB *pEnv = NULL;
|
||||||
|
|
||||||
|
int ret = tdbOpen(envName, pageSize, pageNum, &pEnv, 0);
|
||||||
|
if (ret) {
|
||||||
|
pEnv = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return pEnv;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void generateBigVal(char *val, int valLen) {
|
||||||
|
for (int i = 0; i < valLen; ++i) {
|
||||||
|
char c = char(i & 0xff);
|
||||||
|
if (c == 0) {
|
||||||
|
c = 1;
|
||||||
|
}
|
||||||
|
val[i] = c;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void insertOfp(void) {
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
// open Env
|
||||||
|
int const pageSize = 4096;
|
||||||
|
int const pageNum = 64;
|
||||||
|
TDB *pEnv = openEnv("tdb", pageSize, pageNum);
|
||||||
|
GTEST_ASSERT_NE(pEnv, nullptr);
|
||||||
|
|
||||||
|
// open db
|
||||||
|
TTB *pDb = NULL;
|
||||||
|
tdb_cmpr_fn_t compFunc = tKeyCmpr;
|
||||||
|
// ret = tdbTbOpen("ofp_insert.db", -1, -1, compFunc, pEnv, &pDb, 0);
|
||||||
|
ret = tdbTbOpen("ofp_insert.db", -1, -1, compFunc, pEnv, &pDb, 0);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
// open the pool
|
||||||
|
SPoolMem *pPool = openPool();
|
||||||
|
|
||||||
|
// start a transaction
|
||||||
|
TXN *txn = NULL;
|
||||||
|
|
||||||
|
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||||
|
|
||||||
|
// generate value payload
|
||||||
|
// char val[((4083 - 4 - 3 - 2) + 1) * 100]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||||
|
char val[32605];
|
||||||
|
int valLen = sizeof(val) / sizeof(val[0]);
|
||||||
|
generateBigVal(val, valLen);
|
||||||
|
|
||||||
|
// insert the generated big data
|
||||||
|
// char const *key = "key1";
|
||||||
|
char const *key = "key123456789";
|
||||||
|
ret = tdbTbInsert(pDb, key, strlen(key) + 1, val, valLen, txn);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
// commit current transaction
|
||||||
|
tdbCommit(pEnv, txn);
|
||||||
|
tdbPostCommit(pEnv, txn);
|
||||||
|
|
||||||
|
closePool(pPool);
|
||||||
|
|
||||||
|
// Close a database
|
||||||
|
tdbTbClose(pDb);
|
||||||
|
|
||||||
|
// Close Env
|
||||||
|
ret = tdbClose(pEnv);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void clearDb(char const *db) { taosRemoveDir(db); }
|
||||||
|
|
||||||
|
TEST(TdbPageRecycleTest, DISABLED_TbInsertTest) {
|
||||||
|
// TEST(TdbPageRecycleTest, TbInsertTest) {
|
||||||
|
// ofp inserting
|
||||||
|
clearDb("tdb");
|
||||||
|
insertOfp();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(TdbPageRecycleTest, DISABLED_TbGetTest) {
|
||||||
|
// TEST(TdbPageRecycleTest, TbGetTest) {
|
||||||
|
clearDb("tdb");
|
||||||
|
insertOfp();
|
||||||
|
|
||||||
|
// open Env
|
||||||
|
int const pageSize = 4096;
|
||||||
|
int const pageNum = 64;
|
||||||
|
TDB *pEnv = openEnv("tdb", pageSize, pageNum);
|
||||||
|
GTEST_ASSERT_NE(pEnv, nullptr);
|
||||||
|
|
||||||
|
// open db
|
||||||
|
TTB *pDb = NULL;
|
||||||
|
tdb_cmpr_fn_t compFunc = tKeyCmpr;
|
||||||
|
// int ret = tdbTbOpen("ofp_insert.db", -1, -1, compFunc, pEnv, &pDb, 0);
|
||||||
|
int ret = tdbTbOpen("ofp_insert.db", 12, -1, compFunc, pEnv, &pDb, 0);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
// generate value payload
|
||||||
|
// char val[((4083 - 4 - 3 - 2) + 1) * 100]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||||
|
char val[32605];
|
||||||
|
int valLen = sizeof(val) / sizeof(val[0]);
|
||||||
|
generateBigVal(val, valLen);
|
||||||
|
|
||||||
|
{ // Query the data
|
||||||
|
void *pVal = NULL;
|
||||||
|
int vLen;
|
||||||
|
|
||||||
|
// char const *key = "key1";
|
||||||
|
char const *key = "key123456789";
|
||||||
|
ret = tdbTbGet(pDb, key, strlen(key), &pVal, &vLen);
|
||||||
|
ASSERT(ret == 0);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
GTEST_ASSERT_EQ(vLen, valLen);
|
||||||
|
GTEST_ASSERT_EQ(memcmp(val, pVal, vLen), 0);
|
||||||
|
|
||||||
|
tdbFree(pVal);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(TdbPageRecycleTest, DISABLED_TbDeleteTest) {
|
||||||
|
// TEST(TdbPageRecycleTest, TbDeleteTest) {
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
taosRemoveDir("tdb");
|
||||||
|
|
||||||
|
// open Env
|
||||||
|
int const pageSize = 4096;
|
||||||
|
int const pageNum = 64;
|
||||||
|
TDB *pEnv = openEnv("tdb", pageSize, pageNum);
|
||||||
|
GTEST_ASSERT_NE(pEnv, nullptr);
|
||||||
|
|
||||||
|
// open db
|
||||||
|
TTB *pDb = NULL;
|
||||||
|
tdb_cmpr_fn_t compFunc = tKeyCmpr;
|
||||||
|
ret = tdbTbOpen("ofp_insert.db", -1, -1, compFunc, pEnv, &pDb, 0);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
// open the pool
|
||||||
|
SPoolMem *pPool = openPool();
|
||||||
|
|
||||||
|
// start a transaction
|
||||||
|
TXN *txn;
|
||||||
|
|
||||||
|
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||||
|
|
||||||
|
// generate value payload
|
||||||
|
// char val[((4083 - 4 - 3 - 2) + 1) * 100]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||||
|
char val[((4083 - 4 - 3 - 2) + 1) * 2]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||||
|
int valLen = sizeof(val) / sizeof(val[0]);
|
||||||
|
generateBigVal(val, valLen);
|
||||||
|
|
||||||
|
{ // insert the generated big data
|
||||||
|
ret = tdbTbInsert(pDb, "key1", strlen("key1"), val, valLen, txn);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // query the data
|
||||||
|
void *pVal = NULL;
|
||||||
|
int vLen;
|
||||||
|
|
||||||
|
ret = tdbTbGet(pDb, "key1", strlen("key1"), &pVal, &vLen);
|
||||||
|
ASSERT(ret == 0);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
GTEST_ASSERT_EQ(vLen, valLen);
|
||||||
|
GTEST_ASSERT_EQ(memcmp(val, pVal, vLen), 0);
|
||||||
|
|
||||||
|
tdbFree(pVal);
|
||||||
|
}
|
||||||
|
/* open to debug committed file
|
||||||
|
tdbCommit(pEnv, &txn);
|
||||||
|
tdbTxnClose(&txn);
|
||||||
|
|
||||||
|
++txnid;
|
||||||
|
tdbTxnOpen(&txn, txnid, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||||
|
tdbBegin(pEnv, &txn);
|
||||||
|
*/
|
||||||
|
{ // upsert the data
|
||||||
|
ret = tdbTbUpsert(pDb, "key1", strlen("key1"), "value1", strlen("value1"), txn);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // query the upserted data
|
||||||
|
void *pVal = NULL;
|
||||||
|
int vLen;
|
||||||
|
|
||||||
|
ret = tdbTbGet(pDb, "key1", strlen("key1"), &pVal, &vLen);
|
||||||
|
ASSERT(ret == 0);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
GTEST_ASSERT_EQ(vLen, strlen("value1"));
|
||||||
|
GTEST_ASSERT_EQ(memcmp("value1", pVal, vLen), 0);
|
||||||
|
|
||||||
|
tdbFree(pVal);
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // delete the data
|
||||||
|
ret = tdbTbDelete(pDb, "key1", strlen("key1"), txn);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // query the deleted data
|
||||||
|
void *pVal = NULL;
|
||||||
|
int vLen = -1;
|
||||||
|
|
||||||
|
ret = tdbTbGet(pDb, "key1", strlen("key1"), &pVal, &vLen);
|
||||||
|
ASSERT(ret == -1);
|
||||||
|
GTEST_ASSERT_EQ(ret, -1);
|
||||||
|
|
||||||
|
GTEST_ASSERT_EQ(vLen, -1);
|
||||||
|
GTEST_ASSERT_EQ(pVal, nullptr);
|
||||||
|
|
||||||
|
tdbFree(pVal);
|
||||||
|
}
|
||||||
|
|
||||||
|
// commit current transaction
|
||||||
|
tdbCommit(pEnv, txn);
|
||||||
|
tdbPostCommit(pEnv, txn);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(TdbPageRecycleTest, DISABLED_simple_insert1) {
|
||||||
|
// TEST(TdbPageRecycleTest, simple_insert1) {
|
||||||
|
int ret;
|
||||||
|
TDB *pEnv;
|
||||||
|
TTB *pDb;
|
||||||
|
tdb_cmpr_fn_t compFunc;
|
||||||
|
int nData = 1;
|
||||||
|
TXN *txn;
|
||||||
|
int const pageSize = 4096;
|
||||||
|
|
||||||
|
taosRemoveDir("tdb");
|
||||||
|
|
||||||
|
// Open Env
|
||||||
|
ret = tdbOpen("tdb", pageSize, 64, &pEnv, 0);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
// Create a database
|
||||||
|
compFunc = tKeyCmpr;
|
||||||
|
ret = tdbTbOpen("db.db", -1, -1, compFunc, pEnv, &pDb, 0);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
{
|
||||||
|
char key[64];
|
||||||
|
// char val[(4083 - 4 - 3 - 2)]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||||
|
char val[(4083 - 4 - 3 - 2) + 1]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||||
|
int64_t poolLimit = 4096; // 1M pool limit
|
||||||
|
SPoolMem *pPool;
|
||||||
|
|
||||||
|
// open the pool
|
||||||
|
pPool = openPool();
|
||||||
|
|
||||||
|
// start a transaction
|
||||||
|
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||||
|
|
||||||
|
for (int iData = 1; iData <= nData; iData++) {
|
||||||
|
sprintf(key, "key0");
|
||||||
|
sprintf(val, "value%d", iData);
|
||||||
|
|
||||||
|
// ret = tdbTbInsert(pDb, key, strlen(key), val, strlen(val), &txn);
|
||||||
|
// GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
// generate value payload
|
||||||
|
int valLen = sizeof(val) / sizeof(val[0]);
|
||||||
|
for (int i = 6; i < valLen; ++i) {
|
||||||
|
char c = char(i & 0xff);
|
||||||
|
if (c == 0) {
|
||||||
|
c = 1;
|
||||||
|
}
|
||||||
|
val[i] = c;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = tdbTbInsert(pDb, "key1", strlen("key1"), val, valLen, txn);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
// if pool is full, commit the transaction and start a new one
|
||||||
|
if (pPool->size >= poolLimit) {
|
||||||
|
// commit current transaction
|
||||||
|
tdbCommit(pEnv, txn);
|
||||||
|
tdbPostCommit(pEnv, txn);
|
||||||
|
|
||||||
|
// start a new transaction
|
||||||
|
clearPool(pPool);
|
||||||
|
|
||||||
|
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// commit the transaction
|
||||||
|
tdbCommit(pEnv, txn);
|
||||||
|
tdbPostCommit(pEnv, txn);
|
||||||
|
|
||||||
|
{ // Query the data
|
||||||
|
void *pVal = NULL;
|
||||||
|
int vLen;
|
||||||
|
|
||||||
|
for (int i = 1; i <= nData; i++) {
|
||||||
|
sprintf(key, "key%d", i);
|
||||||
|
// sprintf(val, "value%d", i);
|
||||||
|
|
||||||
|
ret = tdbTbGet(pDb, key, strlen(key), &pVal, &vLen);
|
||||||
|
ASSERT(ret == 0);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
GTEST_ASSERT_EQ(vLen, sizeof(val) / sizeof(val[0]));
|
||||||
|
GTEST_ASSERT_EQ(memcmp(val, pVal, vLen), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
tdbFree(pVal);
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Iterate to query the DB data
|
||||||
|
TBC *pDBC;
|
||||||
|
void *pKey = NULL;
|
||||||
|
void *pVal = NULL;
|
||||||
|
int vLen, kLen;
|
||||||
|
int count = 0;
|
||||||
|
|
||||||
|
ret = tdbTbcOpen(pDb, &pDBC, NULL);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
tdbTbcMoveToFirst(pDBC);
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
ret = tdbTbcNext(pDBC, &pKey, &kLen, &pVal, &vLen);
|
||||||
|
if (ret < 0) break;
|
||||||
|
|
||||||
|
// std::cout.write((char *)pKey, kLen) /* << " " << kLen */ << " ";
|
||||||
|
// std::cout.write((char *)pVal, vLen) /* << " " << vLen */;
|
||||||
|
// std::cout << std::endl;
|
||||||
|
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
|
||||||
|
GTEST_ASSERT_EQ(count, nData);
|
||||||
|
|
||||||
|
tdbTbcClose(pDBC);
|
||||||
|
|
||||||
|
tdbFree(pKey);
|
||||||
|
tdbFree(pVal);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = tdbTbDrop(pDb);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
// Close a database
|
||||||
|
tdbTbClose(pDb);
|
||||||
|
|
||||||
|
// Close Env
|
||||||
|
ret = tdbClose(pEnv);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void insertDb(int nData) {
|
||||||
|
int ret = 0;
|
||||||
|
TDB *pEnv = NULL;
|
||||||
|
TTB *pDb = NULL;
|
||||||
|
tdb_cmpr_fn_t compFunc;
|
||||||
|
TXN *txn = NULL;
|
||||||
|
int const pageSize = 4 * 1024;
|
||||||
|
|
||||||
|
// Open Env
|
||||||
|
ret = tdbOpen("tdb", pageSize, 64, &pEnv, 0);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
// Create a database
|
||||||
|
compFunc = tKeyCmpr;
|
||||||
|
ret = tdbTbOpen("db.db", -1, -1, compFunc, pEnv, &pDb, 0);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
// 1, insert nData kv
|
||||||
|
{
|
||||||
|
char key[64];
|
||||||
|
char val[(4083 - 4 - 3 - 2) + 1]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||||
|
int64_t poolLimit = 4096; // 1M pool limit
|
||||||
|
SPoolMem *pPool;
|
||||||
|
|
||||||
|
// open the pool
|
||||||
|
pPool = openPool();
|
||||||
|
|
||||||
|
// start a transaction
|
||||||
|
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||||
|
|
||||||
|
for (int iData = 0; iData < nData; ++iData) {
|
||||||
|
sprintf(key, "key%03d", iData);
|
||||||
|
sprintf(val, "value%03d", iData);
|
||||||
|
|
||||||
|
ret = tdbTbInsert(pDb, key, strlen(key), val, strlen(val), txn);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
// if pool is full, commit the transaction and start a new one
|
||||||
|
if (pPool->size >= poolLimit) {
|
||||||
|
// commit current transaction
|
||||||
|
tdbCommit(pEnv, txn);
|
||||||
|
tdbPostCommit(pEnv, txn);
|
||||||
|
|
||||||
|
// start a new transaction
|
||||||
|
clearPool(pPool);
|
||||||
|
|
||||||
|
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// commit the transaction
|
||||||
|
tdbCommit(pEnv, txn);
|
||||||
|
tdbPostCommit(pEnv, txn);
|
||||||
|
|
||||||
|
// 2, delete nData/2 records
|
||||||
|
|
||||||
|
closePool(pPool);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close a database
|
||||||
|
tdbTbClose(pDb);
|
||||||
|
|
||||||
|
// Close Env
|
||||||
|
ret = tdbClose(pEnv);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
system("ls -l ./tdb");
|
||||||
|
}
|
||||||
|
|
||||||
|
static void deleteDb(int nData) {
|
||||||
|
int ret = 0;
|
||||||
|
TDB *pEnv = NULL;
|
||||||
|
TTB *pDb = NULL;
|
||||||
|
tdb_cmpr_fn_t compFunc;
|
||||||
|
TXN *txn = NULL;
|
||||||
|
int const pageSize = 4 * 1024;
|
||||||
|
|
||||||
|
// Open Env
|
||||||
|
ret = tdbOpen("tdb", pageSize, 64, &pEnv, 0);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
// Create a database
|
||||||
|
compFunc = tKeyCmpr;
|
||||||
|
ret = tdbTbOpen("db.db", -1, -1, compFunc, pEnv, &pDb, 0);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
// 2, delete nData/2 records
|
||||||
|
{
|
||||||
|
char key[64];
|
||||||
|
char val[(4083 - 4 - 3 - 2) + 1]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||||
|
int64_t poolLimit = 4096; // 1M pool limit
|
||||||
|
SPoolMem *pPool;
|
||||||
|
|
||||||
|
// open the pool
|
||||||
|
pPool = openPool();
|
||||||
|
|
||||||
|
// start a transaction
|
||||||
|
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||||
|
|
||||||
|
for (int iData = 0; iData < nData; iData++) {
|
||||||
|
// if (iData % 2 == 0) continue;
|
||||||
|
|
||||||
|
sprintf(key, "key%03d", iData);
|
||||||
|
sprintf(val, "value%03d", iData);
|
||||||
|
|
||||||
|
{ // delete the data
|
||||||
|
ret = tdbTbDelete(pDb, key, strlen(key), txn);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
}
|
||||||
|
// if pool is full, commit the transaction and start a new one
|
||||||
|
if (pPool->size >= poolLimit) {
|
||||||
|
// commit current transaction
|
||||||
|
tdbCommit(pEnv, txn);
|
||||||
|
tdbPostCommit(pEnv, txn);
|
||||||
|
|
||||||
|
// start a new transaction
|
||||||
|
clearPool(pPool);
|
||||||
|
|
||||||
|
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// commit the transaction
|
||||||
|
tdbCommit(pEnv, txn);
|
||||||
|
tdbPostCommit(pEnv, txn);
|
||||||
|
|
||||||
|
closePool(pPool);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close a database
|
||||||
|
tdbTbClose(pDb);
|
||||||
|
|
||||||
|
// Close Env
|
||||||
|
ret = tdbClose(pEnv);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
system("ls -l ./tdb");
|
||||||
|
}
|
||||||
|
|
||||||
|
static const int nDataConst = 256 * 19;
|
||||||
|
|
||||||
|
// TEST(TdbPageRecycleTest, DISABLED_seq_insert) {
|
||||||
|
TEST(TdbPageRecycleTest, seq_insert) {
|
||||||
|
clearDb("tdb");
|
||||||
|
insertDb(nDataConst);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TEST(TdbPageRecycleTest, DISABLED_seq_delete) {
|
||||||
|
TEST(TdbPageRecycleTest, seq_delete) { deleteDb(nDataConst); }
|
||||||
|
|
||||||
|
// TEST(TdbPageRecycleTest, DISABLED_recycly_insert) {
|
||||||
|
TEST(TdbPageRecycleTest, recycly_insert) { insertDb(nDataConst); }
|
||||||
|
|
||||||
|
// TEST(TdbPageRecycleTest, DISABLED_recycly_seq_insert_ofp) {
|
||||||
|
TEST(TdbPageRecycleTest, recycly_seq_insert_ofp) {
|
||||||
|
clearDb("tdb");
|
||||||
|
insertOfp();
|
||||||
|
system("ls -l ./tdb");
|
||||||
|
}
|
||||||
|
|
||||||
|
static void deleteOfp(void) {
|
||||||
|
// open Env
|
||||||
|
int ret = 0;
|
||||||
|
int const pageSize = 4096;
|
||||||
|
int const pageNum = 64;
|
||||||
|
TDB *pEnv = openEnv("tdb", pageSize, pageNum);
|
||||||
|
GTEST_ASSERT_NE(pEnv, nullptr);
|
||||||
|
|
||||||
|
// open db
|
||||||
|
TTB *pDb = NULL;
|
||||||
|
tdb_cmpr_fn_t compFunc = tKeyCmpr;
|
||||||
|
ret = tdbTbOpen("ofp_insert.db", -1, -1, compFunc, pEnv, &pDb, 0);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
// open the pool
|
||||||
|
SPoolMem *pPool = openPool();
|
||||||
|
|
||||||
|
// start a transaction
|
||||||
|
TXN *txn;
|
||||||
|
|
||||||
|
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||||
|
|
||||||
|
{ // delete the data
|
||||||
|
char const *key = "key123456789";
|
||||||
|
ret = tdbTbDelete(pDb, key, strlen(key) + 1, txn);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// commit current transaction
|
||||||
|
tdbCommit(pEnv, txn);
|
||||||
|
tdbPostCommit(pEnv, txn);
|
||||||
|
|
||||||
|
closePool(pPool);
|
||||||
|
|
||||||
|
ret = tdbTbDrop(pDb);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
// Close a database
|
||||||
|
tdbTbClose(pDb);
|
||||||
|
|
||||||
|
// Close Env
|
||||||
|
ret = tdbClose(pEnv);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TEST(TdbPageRecycleTest, DISABLED_seq_delete_ofp) {
|
||||||
|
TEST(TdbPageRecycleTest, seq_delete_ofp) {
|
||||||
|
deleteOfp();
|
||||||
|
system("ls -l ./tdb");
|
||||||
|
}
|
||||||
|
|
||||||
|
// TEST(TdbPageRecycleTest, DISABLED_recycly_seq_insert_ofp_again) {
|
||||||
|
TEST(TdbPageRecycleTest, recycly_seq_insert_ofp_again) {
|
||||||
|
insertOfp();
|
||||||
|
system("ls -l ./tdb");
|
||||||
|
}
|
||||||
|
|
||||||
|
// TEST(TdbPageRecycleTest, DISABLED_recycly_seq_insert_ofp_nocommit) {
|
||||||
|
TEST(TdbPageRecycleTest, recycly_seq_insert_ofp_nocommit) {
|
||||||
|
clearDb("tdb");
|
||||||
|
insertOfp();
|
||||||
|
system("ls -l ./tdb");
|
||||||
|
|
||||||
|
// open Env
|
||||||
|
int ret = 0;
|
||||||
|
int const pageSize = 4096;
|
||||||
|
int const pageNum = 64;
|
||||||
|
TDB *pEnv = openEnv("tdb", pageSize, pageNum);
|
||||||
|
GTEST_ASSERT_NE(pEnv, nullptr);
|
||||||
|
|
||||||
|
// open db
|
||||||
|
TTB *pDb = NULL;
|
||||||
|
tdb_cmpr_fn_t compFunc = tKeyCmpr;
|
||||||
|
ret = tdbTbOpen("ofp_insert.db", -1, -1, compFunc, pEnv, &pDb, 0);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
// open the pool
|
||||||
|
SPoolMem *pPool = openPool();
|
||||||
|
|
||||||
|
// start a transaction
|
||||||
|
TXN *txn;
|
||||||
|
|
||||||
|
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||||
|
|
||||||
|
{ // delete the data
|
||||||
|
char const *key = "key123456789";
|
||||||
|
ret = tdbTbDelete(pDb, key, strlen(key) + 1, txn);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1, insert nData kv
|
||||||
|
{
|
||||||
|
int nData = nDataConst;
|
||||||
|
char key[64];
|
||||||
|
char val[(4083 - 4 - 3 - 2) + 1]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
|
||||||
|
int64_t poolLimit = 4096; // 1M pool limit
|
||||||
|
|
||||||
|
for (int iData = 0; iData < nData; ++iData) {
|
||||||
|
sprintf(key, "key%03d", iData);
|
||||||
|
sprintf(val, "value%03d", iData);
|
||||||
|
|
||||||
|
ret = tdbTbInsert(pDb, key, strlen(key), val, strlen(val), txn);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
// if pool is full, commit the transaction and start a new one
|
||||||
|
if (pPool->size >= poolLimit) {
|
||||||
|
// commit current transaction
|
||||||
|
tdbCommit(pEnv, txn);
|
||||||
|
tdbPostCommit(pEnv, txn);
|
||||||
|
|
||||||
|
// start a new transaction
|
||||||
|
clearPool(pPool);
|
||||||
|
|
||||||
|
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// commit current transaction
|
||||||
|
tdbCommit(pEnv, txn);
|
||||||
|
tdbPostCommit(pEnv, txn);
|
||||||
|
|
||||||
|
closePool(pPool);
|
||||||
|
|
||||||
|
// Close a database
|
||||||
|
tdbTbClose(pDb);
|
||||||
|
|
||||||
|
// Close Env
|
||||||
|
ret = tdbClose(pEnv);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
system("ls -l ./tdb");
|
||||||
|
}
|
||||||
|
|
||||||
|
// TEST(TdbPageRecycleTest, DISABLED_recycly_delete_interior_ofp_nocommit) {
|
||||||
|
TEST(TdbPageRecycleTest, recycly_delete_interior_ofp_nocommit) {
|
||||||
|
clearDb("tdb");
|
||||||
|
|
||||||
|
// open Env
|
||||||
|
int ret = 0;
|
||||||
|
int const pageSize = 4096;
|
||||||
|
int const pageNum = 64;
|
||||||
|
TDB *pEnv = openEnv("tdb", pageSize, pageNum);
|
||||||
|
GTEST_ASSERT_NE(pEnv, nullptr);
|
||||||
|
|
||||||
|
// open db
|
||||||
|
TTB *pDb = NULL;
|
||||||
|
tdb_cmpr_fn_t compFunc = NULL; // tKeyCmpr;
|
||||||
|
ret = tdbTbOpen("ofp_insert.db", -1, -1, compFunc, pEnv, &pDb, 0);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
// open the pool
|
||||||
|
SPoolMem *pPool = openPool();
|
||||||
|
|
||||||
|
// start a transaction
|
||||||
|
TXN *txn;
|
||||||
|
|
||||||
|
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||||
|
|
||||||
|
char key[1024] = {0};
|
||||||
|
int count = sizeof(key) / sizeof(key[0]);
|
||||||
|
for (int i = 0; i < count - 1; ++i) {
|
||||||
|
key[i] = 'a';
|
||||||
|
}
|
||||||
|
|
||||||
|
// insert n ofp keys to form 2-layer btree
|
||||||
|
{
|
||||||
|
for (int i = 0; i < 7; ++i) {
|
||||||
|
// sprintf(&key[count - 2], "%c", i);
|
||||||
|
key[count - 2] = '0' + i;
|
||||||
|
|
||||||
|
ret = tdbTbInsert(pDb, key, count, NULL, NULL, txn);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
// delete one interior key
|
||||||
|
{
|
||||||
|
sprintf(&key[count - 2], "%c", 2);
|
||||||
|
key[count - 2] = '0' + 2;
|
||||||
|
|
||||||
|
ret = tdbTbDelete(pDb, key, strlen(key) + 1, txn);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
// commit current transaction
|
||||||
|
tdbCommit(pEnv, txn);
|
||||||
|
tdbPostCommit(pEnv, txn);
|
||||||
|
|
||||||
|
closePool(pPool);
|
||||||
|
|
||||||
|
// Close a database
|
||||||
|
tdbTbClose(pDb);
|
||||||
|
|
||||||
|
// Close Env
|
||||||
|
ret = tdbClose(pEnv);
|
||||||
|
GTEST_ASSERT_EQ(ret, 0);
|
||||||
|
|
||||||
|
system("ls -l ./tdb");
|
||||||
|
}
|
|
@ -568,6 +568,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_SELECTED_EXPR, "Invalid SELECTed ex
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_GET_META_ERROR, "Fail to get table info")
|
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_GET_META_ERROR, "Fail to get table info")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_NOT_UNIQUE_TABLE_ALIAS, "Not unique table/alias")
|
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_NOT_UNIQUE_TABLE_ALIAS, "Not unique table/alias")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC, "System table not allowed")
|
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC, "System table not allowed")
|
||||||
|
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED, "System table not allowed")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTERNAL_ERROR, "Parser internal error")
|
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTERNAL_ERROR, "Parser internal error")
|
||||||
|
|
||||||
//planner
|
//planner
|
||||||
|
@ -626,6 +627,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_FS_UPDATE, "Rsma fs update erro
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Index is rebuilding")
|
TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Index is rebuilding")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_INVALID_FILE, "Index file is invalid")
|
TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_INVALID_FILE, "Index file is invalid")
|
||||||
|
|
||||||
|
//scalar
|
||||||
|
TAOS_DEFINE_ERROR(TSDB_CODE_SCALAR_CONVERT_ERROR, "Cannot convert to specific type")
|
||||||
|
|
||||||
//tmq
|
//tmq
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message")
|
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_MISMATCH, "Consumer mismatch")
|
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_MISMATCH, "Consumer mismatch")
|
||||||
|
@ -675,7 +679,7 @@ const char* tstrerror(int32_t err) {
|
||||||
if ((err & 0x00ff0000) == 0x00ff0000) {
|
if ((err & 0x00ff0000) == 0x00ff0000) {
|
||||||
int32_t code = err & 0x0000ffff;
|
int32_t code = err & 0x0000ffff;
|
||||||
// strerror can handle any invalid code
|
// strerror can handle any invalid code
|
||||||
// invalid code return Unknown error
|
// invalid code return Unknown error
|
||||||
return strerror(code);
|
return strerror(code);
|
||||||
}
|
}
|
||||||
int32_t s = 0;
|
int32_t s = 0;
|
||||||
|
|
|
@ -230,7 +230,7 @@ static void pqSwapPQNode(PriorityQueueNode* a, PriorityQueueNode* b) {
|
||||||
|
|
||||||
size_t taosPQSize(PriorityQueue* pq) { return pqContainerSize(pq); }
|
size_t taosPQSize(PriorityQueue* pq) { return pqContainerSize(pq); }
|
||||||
|
|
||||||
static void pqHeapify(PriorityQueue* pq, size_t from, size_t last) {
|
static PriorityQueueNode* pqHeapify(PriorityQueue* pq, size_t from, size_t last) {
|
||||||
size_t largest = from;
|
size_t largest = from;
|
||||||
do {
|
do {
|
||||||
from = largest;
|
from = largest;
|
||||||
|
@ -246,6 +246,7 @@ static void pqHeapify(PriorityQueue* pq, size_t from, size_t last) {
|
||||||
pqSwapPQNode(pqContainerGetEle(pq, from), pqContainerGetEle(pq, largest));
|
pqSwapPQNode(pqContainerGetEle(pq, from), pqContainerGetEle(pq, largest));
|
||||||
}
|
}
|
||||||
} while (largest != from);
|
} while (largest != from);
|
||||||
|
return pqContainerGetEle(pq, largest);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pqBuildHeap(PriorityQueue* pq) {
|
static void pqBuildHeap(PriorityQueue* pq) {
|
||||||
|
@ -257,12 +258,13 @@ static void pqBuildHeap(PriorityQueue* pq) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pqReverseHeapify(PriorityQueue* pq, size_t i) {
|
static PriorityQueueNode* pqReverseHeapify(PriorityQueue* pq, size_t i) {
|
||||||
while (i > 0 && !pq->fn(pqContainerGetEle(pq, i)->data, pqContainerGetEle(pq, pqParent(i))->data, pq->param)) {
|
while (i > 0 && !pq->fn(pqContainerGetEle(pq, i)->data, pqContainerGetEle(pq, pqParent(i))->data, pq->param)) {
|
||||||
size_t parentIdx = pqParent(i);
|
size_t parentIdx = pqParent(i);
|
||||||
pqSwapPQNode(pqContainerGetEle(pq, i), pqContainerGetEle(pq, parentIdx));
|
pqSwapPQNode(pqContainerGetEle(pq, i), pqContainerGetEle(pq, parentIdx));
|
||||||
i = parentIdx;
|
i = parentIdx;
|
||||||
}
|
}
|
||||||
|
return pqContainerGetEle(pq, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pqUpdate(PriorityQueue* pq, size_t i) {
|
static void pqUpdate(PriorityQueue* pq, size_t i) {
|
||||||
|
@ -290,9 +292,9 @@ PriorityQueueNode* taosPQTop(PriorityQueue* pq) {
|
||||||
return pqContainerGetEle(pq, 0);
|
return pqContainerGetEle(pq, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void taosPQPush(PriorityQueue* pq, const PriorityQueueNode* node) {
|
PriorityQueueNode* taosPQPush(PriorityQueue* pq, const PriorityQueueNode* node) {
|
||||||
taosArrayPush(pq->container, node);
|
taosArrayPush(pq->container, node);
|
||||||
pqReverseHeapify(pq, pqContainerSize(pq) - 1);
|
return pqReverseHeapify(pq, pqContainerSize(pq) - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void taosPQPop(PriorityQueue* pq) {
|
void taosPQPop(PriorityQueue* pq) {
|
||||||
|
@ -324,16 +326,20 @@ void destroyBoundedQueue(BoundedQueue* q) {
|
||||||
taosMemoryFree(q);
|
taosMemoryFree(q);
|
||||||
}
|
}
|
||||||
|
|
||||||
void taosBQPush(BoundedQueue* q, PriorityQueueNode* n) {
|
PriorityQueueNode* taosBQPush(BoundedQueue* q, PriorityQueueNode* n) {
|
||||||
if (pqContainerSize(q->queue) == q->maxSize + 1) {
|
if (pqContainerSize(q->queue) == q->maxSize + 1) {
|
||||||
PriorityQueueNode* top = pqContainerGetEle(q->queue, 0);
|
PriorityQueueNode* top = pqContainerGetEle(q->queue, 0);
|
||||||
void *p = top->data;
|
if (q->queue->fn(top->data, n->data, q->queue->param)) {
|
||||||
top->data = n->data;
|
return NULL;
|
||||||
n->data = p;
|
} else {
|
||||||
if (q->queue->deleteFn) q->queue->deleteFn(n->data);
|
void* p = top->data;
|
||||||
pqHeapify(q->queue, 0, taosBQSize(q));
|
top->data = n->data;
|
||||||
|
n->data = p;
|
||||||
|
if (q->queue->deleteFn) q->queue->deleteFn(n->data);
|
||||||
|
}
|
||||||
|
return pqHeapify(q->queue, 0, taosBQSize(q));
|
||||||
} else {
|
} else {
|
||||||
taosPQPush(q->queue, n);
|
return taosPQPush(q->queue, n);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
import sys
|
import sys
|
||||||
from util.log import *
|
from util.log import *
|
||||||
from util.cases import *
|
from util.cases import *
|
||||||
from util.sql import *
|
from util.sql import *
|
||||||
|
@ -8,15 +8,15 @@ from math import inf
|
||||||
class TDTestCase:
|
class TDTestCase:
|
||||||
def caseDescription(self):
|
def caseDescription(self):
|
||||||
'''
|
'''
|
||||||
case1<shenglian zhou>: [TD-11204]Difference improvement that can ignore negative
|
case1<shenglian zhou>: [TD-11204]Difference improvement that can ignore negative
|
||||||
'''
|
'''
|
||||||
return
|
return
|
||||||
|
|
||||||
def init(self, conn, logSql, replicaVer=1):
|
def init(self, conn, logSql, replicaVer=1):
|
||||||
tdLog.debug("start to execute %s" % __file__)
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
tdSql.init(conn.cursor(), False)
|
tdSql.init(conn.cursor(), False)
|
||||||
self._conn = conn
|
self._conn = conn
|
||||||
|
|
||||||
def restartTaosd(self, index=1, dbname="db"):
|
def restartTaosd(self, index=1, dbname="db"):
|
||||||
tdDnodes.stop(index)
|
tdDnodes.stop(index)
|
||||||
tdDnodes.startWithoutSleep(index)
|
tdDnodes.startWithoutSleep(index)
|
||||||
|
@ -42,17 +42,17 @@ class TDTestCase:
|
||||||
tdSql.query('show create database scd;')
|
tdSql.query('show create database scd;')
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
tdSql.checkData(0, 0, 'scd')
|
tdSql.checkData(0, 0, 'scd')
|
||||||
tdSql.checkData(0, 1, "CREATE DATABASE `scd` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 1 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0 WAL_ROLL_PERIOD 0 WAL_SEGMENT_SIZE 0")
|
tdSql.checkData(0, 1, "CREATE DATABASE `scd` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 1 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0")
|
||||||
|
|
||||||
tdSql.query('show create database scd2;')
|
tdSql.query('show create database scd2;')
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
tdSql.checkData(0, 0, 'scd2')
|
tdSql.checkData(0, 0, 'scd2')
|
||||||
tdSql.checkData(0, 1, "CREATE DATABASE `scd2` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 3 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0 WAL_ROLL_PERIOD 0 WAL_SEGMENT_SIZE 0")
|
tdSql.checkData(0, 1, "CREATE DATABASE `scd2` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 3 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0")
|
||||||
|
|
||||||
tdSql.query('show create database scd4')
|
tdSql.query('show create database scd4')
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
tdSql.checkData(0, 0, 'scd4')
|
tdSql.checkData(0, 0, 'scd4')
|
||||||
tdSql.checkData(0, 1, "CREATE DATABASE `scd4` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 13 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0 WAL_ROLL_PERIOD 0 WAL_SEGMENT_SIZE 0")
|
tdSql.checkData(0, 1, "CREATE DATABASE `scd4` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 13 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0")
|
||||||
|
|
||||||
|
|
||||||
self.restartTaosd(1, dbname='scd')
|
self.restartTaosd(1, dbname='scd')
|
||||||
|
@ -60,17 +60,17 @@ class TDTestCase:
|
||||||
tdSql.query('show create database scd;')
|
tdSql.query('show create database scd;')
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
tdSql.checkData(0, 0, 'scd')
|
tdSql.checkData(0, 0, 'scd')
|
||||||
tdSql.checkData(0, 1, "CREATE DATABASE `scd` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 1 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0 WAL_ROLL_PERIOD 0 WAL_SEGMENT_SIZE 0")
|
tdSql.checkData(0, 1, "CREATE DATABASE `scd` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 1 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0")
|
||||||
|
|
||||||
tdSql.query('show create database scd2;')
|
tdSql.query('show create database scd2;')
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
tdSql.checkData(0, 0, 'scd2')
|
tdSql.checkData(0, 0, 'scd2')
|
||||||
tdSql.checkData(0, 1, "CREATE DATABASE `scd2` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 3 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0 WAL_ROLL_PERIOD 0 WAL_SEGMENT_SIZE 0")
|
tdSql.checkData(0, 1, "CREATE DATABASE `scd2` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 3 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0")
|
||||||
|
|
||||||
tdSql.query('show create database scd4')
|
tdSql.query('show create database scd4')
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
tdSql.checkData(0, 0, 'scd4')
|
tdSql.checkData(0, 0, 'scd4')
|
||||||
tdSql.checkData(0, 1, "CREATE DATABASE `scd4` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 13 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0 WAL_ROLL_PERIOD 0 WAL_SEGMENT_SIZE 0")
|
tdSql.checkData(0, 1, "CREATE DATABASE `scd4` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 13 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 0 WAL_RETENTION_SIZE 0")
|
||||||
|
|
||||||
|
|
||||||
tdSql.execute('drop database scd')
|
tdSql.execute('drop database scd')
|
||||||
|
|
|
@ -117,12 +117,6 @@ endi
|
||||||
if $data23_db != 0 then # wal_retention_size
|
if $data23_db != 0 then # wal_retention_size
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
if $data24_db != 0 then # wal_roll_period
|
|
||||||
return -1
|
|
||||||
endi
|
|
||||||
if $data25_db != 0 then # wal_segment_size
|
|
||||||
return -1
|
|
||||||
endi
|
|
||||||
|
|
||||||
#sql show db.vgroups
|
#sql show db.vgroups
|
||||||
#if $data[0][4] == leader then
|
#if $data[0][4] == leader then
|
||||||
|
|
|
@ -129,6 +129,7 @@ sql DROP INDEX sma_index_3 ;
|
||||||
|
|
||||||
print ========== step8
|
print ========== step8
|
||||||
sql drop database if exists db;
|
sql drop database if exists db;
|
||||||
|
sleep 2000
|
||||||
sql create database db duration 300;
|
sql create database db duration 300;
|
||||||
sql use db;
|
sql use db;
|
||||||
sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint,c_float float, c_double double, c_bool bool,c_binary binary(16), c_nchar nchar(32), c_ts timestamp,c_tint_un tinyint unsigned, c_sint_un smallint unsigned,c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int);
|
sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint,c_float float, c_double double, c_bool bool,c_binary binary(16), c_nchar nchar(32), c_ts timestamp,c_tint_un tinyint unsigned, c_sint_un smallint unsigned,c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int);
|
||||||
|
|
|
@ -7,11 +7,11 @@ sql connect
|
||||||
#sql create database d1 vgroups 2
|
#sql create database d1 vgroups 2
|
||||||
sql create database d1 vgroups 2 table_prefix 3 table_suffix 2
|
sql create database d1 vgroups 2 table_prefix 3 table_suffix 2
|
||||||
sql select * from information_schema.ins_databases
|
sql select * from information_schema.ins_databases
|
||||||
print $data(d1)[27] $data(d1)[28]
|
print $data(d1)[25] $data(d1)[26]
|
||||||
if $data(d1)[27] != 3 then
|
if $data(d1)[25] != 3 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
if $data(d1)[28] != 2 then
|
if $data(d1)[26] != 2 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
|
|
@ -45,8 +45,6 @@ class TDTestCase:
|
||||||
"replica":1,
|
"replica":1,
|
||||||
"wal_level":1,
|
"wal_level":1,
|
||||||
"wal_fsync_period":6000,
|
"wal_fsync_period":6000,
|
||||||
"wal_roll_period":0,
|
|
||||||
"wal_segment_size":1024,
|
|
||||||
"vgroups":self.vgroups,
|
"vgroups":self.vgroups,
|
||||||
"stt_trigger":1,
|
"stt_trigger":1,
|
||||||
"tsdb_pagesize":16
|
"tsdb_pagesize":16
|
||||||
|
|
|
@ -234,6 +234,11 @@ class TDTestCase:
|
||||||
tdSql.checkData(20,6,88)
|
tdSql.checkData(20,6,88)
|
||||||
tdSql.checkData(20,7,1)
|
tdSql.checkData(20,7,1)
|
||||||
|
|
||||||
|
tdSql.query("select udf1(1) from (select 1)")
|
||||||
|
tdSql.checkData(0,0,1)
|
||||||
|
|
||||||
|
tdSql.query("select udf1(n) from (select 1 n)")
|
||||||
|
tdSql.checkData(0,0,1)
|
||||||
|
|
||||||
# aggregate functions
|
# aggregate functions
|
||||||
tdSql.query("select udf2(num1) ,udf2(num2), udf2(num3) from tb")
|
tdSql.query("select udf2(num1) ,udf2(num2), udf2(num3) from tb")
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue