Merge remote-tracking branch 'origin/3.0' into enh/mian_wxy
This commit is contained in:
commit
7d15bc670e
|
@ -2,7 +2,7 @@
|
||||||
# taos-tools
|
# taos-tools
|
||||||
ExternalProject_Add(taos-tools
|
ExternalProject_Add(taos-tools
|
||||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||||
GIT_TAG 4a4027c
|
GIT_TAG a50de3b
|
||||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||||
BINARY_DIR ""
|
BINARY_DIR ""
|
||||||
#BUILD_IN_SOURCE TRUE
|
#BUILD_IN_SOURCE TRUE
|
||||||
|
|
|
@ -47,7 +47,6 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
- In JSON protocol, strings will be converted to NCHAR type and numeric values will be converted to double type.
|
- In JSON protocol, strings will be converted to NCHAR type and numeric values will be converted to double type.
|
||||||
- Only data in array format is accepted and so an array must be used even if there is only one row.
|
|
||||||
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,6 @@ database_option: {
|
||||||
| PRECISION {'ms' | 'us' | 'ns'}
|
| PRECISION {'ms' | 'us' | 'ns'}
|
||||||
| REPLICA value
|
| REPLICA value
|
||||||
| RETENTIONS ingestion_duration:keep_duration ...
|
| RETENTIONS ingestion_duration:keep_duration ...
|
||||||
| STRICT {'off' | 'on'}
|
|
||||||
| WAL_LEVEL {1 | 2}
|
| WAL_LEVEL {1 | 2}
|
||||||
| VGROUPS value
|
| VGROUPS value
|
||||||
| SINGLE_STABLE {0 | 1}
|
| SINGLE_STABLE {0 | 1}
|
||||||
|
@ -61,9 +60,6 @@ database_option: {
|
||||||
- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms.
|
- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms.
|
||||||
- REPLICA: specifies the number of replicas that are made of the database. Enter 1 or 3. The default value is 1. The value of the REPLICA parameter cannot exceed the number of dnodes in the cluster.
|
- REPLICA: specifies the number of replicas that are made of the database. Enter 1 or 3. The default value is 1. The value of the REPLICA parameter cannot exceed the number of dnodes in the cluster.
|
||||||
- RETENTIONS: specifies the retention period for data aggregated at various intervals. For example, RETENTIONS 15s:7d,1m:21d,15m:50d indicates that data aggregated every 15 seconds is retained for 7 days, data aggregated every 1 minute is retained for 21 days, and data aggregated every 15 minutes is retained for 50 days. You must enter three aggregation intervals and corresponding retention periods.
|
- RETENTIONS: specifies the retention period for data aggregated at various intervals. For example, RETENTIONS 15s:7d,1m:21d,15m:50d indicates that data aggregated every 15 seconds is retained for 7 days, data aggregated every 1 minute is retained for 21 days, and data aggregated every 15 minutes is retained for 50 days. You must enter three aggregation intervals and corresponding retention periods.
|
||||||
- STRICT: specifies whether strong data consistency is enabled. The default value is off.
|
|
||||||
- on: Strong consistency is enabled and implemented through the Raft consensus algorithm. In this mode, an operation is considered successful once it is confirmed by half of the nodes in the cluster.
|
|
||||||
- off: Strong consistency is disabled. In this mode, an operation is considered successful when it is initiated by the local node.
|
|
||||||
- WAL_LEVEL: specifies whether fsync is enabled. The default value is 1.
|
- WAL_LEVEL: specifies whether fsync is enabled. The default value is 1.
|
||||||
- 1: WAL is enabled but fsync is disabled.
|
- 1: WAL is enabled but fsync is disabled.
|
||||||
- 2: WAL and fsync are both enabled.
|
- 2: WAL and fsync are both enabled.
|
||||||
|
|
|
@ -10,7 +10,7 @@ Because stream processing is built in to TDengine, you are no longer reliant on
|
||||||
## Create a Stream
|
## Create a Stream
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name AS subquery
|
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name SUBTABLE(expression) AS subquery
|
||||||
stream_options: {
|
stream_options: {
|
||||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
|
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
|
||||||
WATERMARK time
|
WATERMARK time
|
||||||
|
@ -30,6 +30,8 @@ subquery: SELECT [DISTINCT] select_list
|
||||||
|
|
||||||
Session windows, state windows, and sliding windows are supported. When you configure a session or state window for a supertable, you must use PARTITION BY TBNAME.
|
Session windows, state windows, and sliding windows are supported. When you configure a session or state window for a supertable, you must use PARTITION BY TBNAME.
|
||||||
|
|
||||||
|
Subtable Clause defines the naming rules of auto-created subtable, you can see more details in below part: Partitions of Stream.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
window_clause: {
|
window_clause: {
|
||||||
SESSION(ts_col, tol_val)
|
SESSION(ts_col, tol_val)
|
||||||
|
@ -47,6 +49,47 @@ CREATE STREAM avg_vol_s INTO avg_vol AS
|
||||||
SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
|
SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Partitions of Stream
|
||||||
|
|
||||||
|
A Stream can process data in multiple partitions. Partition rules can be defined by PARTITION BY clause in stream processing. Each partition will have different timelines and windows, and will be processed separately and be written into different subtables of target supertable.
|
||||||
|
|
||||||
|
If a stream is created without PARTITION BY clause, all data will be written into one subtable.
|
||||||
|
|
||||||
|
If a stream is created with PARTITION BY clause without SUBTABLE clause, each partition will be given a random name.
|
||||||
|
|
||||||
|
If a stream is created with PARTITION BY clause and SUBTABLE clause, the name of each partition will be calculated according to SUBTABLE clause. For example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE STREAM avg_vol_s INTO avg_vol SUBTABLE(CONCAT('new-', tname)) AS SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname tname INTERVAL(1m);
|
||||||
|
```
|
||||||
|
|
||||||
|
IN PARTITION clause, 'tbname', representing each subtable name of source supertable, is given alias 'tname'. And 'tname' is used in SUBTABLE clause. In SUBTABLE clause, each auto created subtable will concat 'new-' and source subtable name as their name. Other expressions are also allowed in SUBTABLE clause, but the output type must be varchar.
|
||||||
|
|
||||||
|
If the output length exceeds the limitation of TDengine(192), the name will be truncated. If the generated name is occupied by some other table, the creation and writing of the new subtable will be failed.
|
||||||
|
|
||||||
|
## Filling history data
|
||||||
|
|
||||||
|
Normally a stream does not process data already or being written into source table when it's being creating. But adding FILL_HISTORY 1 as a stream option when creating the stream will allow it to process data written before and while creating the stream. For example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
create stream if not exists s1 fill_history 1 into st1 as select count(*) from t1 interval(10s)
|
||||||
|
```
|
||||||
|
|
||||||
|
Combining fill_history option and where clause, stream can processing data of specific time range. For example, only process data after a past time. (In this case, 2020-01-30)
|
||||||
|
|
||||||
|
```sql
|
||||||
|
create stream if not exists s1 fill_history 1 into st1 as select count(*) from t1 where ts > '2020-01-30' interval(10s)
|
||||||
|
```
|
||||||
|
|
||||||
|
As another example, only processing data starting from some past time, and ending at some future time.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
create stream if not exists s1 fill_history 1 into st1 as select count(*) from t1 where ts > '2020-01-30' and ts < '2023-01-01' interval(10s)
|
||||||
|
```
|
||||||
|
|
||||||
|
If some streams are totally outdated, and you do not want it to monitor or process anymore, those streams can be manually dropped and output data will be still kept.
|
||||||
|
|
||||||
|
|
||||||
## Delete a Stream
|
## Delete a Stream
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
|
|
@ -878,8 +878,10 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
|
||||||
|
|
||||||
| taos-jdbcdriver version | major changes |
|
| taos-jdbcdriver version | major changes |
|
||||||
| :---------------------: | :--------------------------------------------: |
|
| :---------------------: | :--------------------------------------------: |
|
||||||
|
| 3.0.3 | fix timestamp resolution error for REST connection in jdk17+ version |
|
||||||
| 3.0.1 - 3.0.2 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use 3.0.2 in the JDK 8 environment |
|
| 3.0.1 - 3.0.2 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use 3.0.2 in the JDK 8 environment |
|
||||||
| 3.0.0 | Support for TDengine 3.0 |
|
| 3.0.0 | Support for TDengine 3.0 |
|
||||||
|
| 2.0.42 | fix wasNull interface return value in WebSocket connection |
|
||||||
| 2.0.41 | fix decode method of username and password in REST connection |
|
| 2.0.41 | fix decode method of username and password in REST connection |
|
||||||
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
|
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
|
||||||
| 2.0.38 | JDBC REST connections add bulk pull function |
|
| 2.0.38 | JDBC REST connections add bulk pull function |
|
||||||
|
|
|
@ -59,6 +59,7 @@ Usage of taosAdapter:
|
||||||
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
|
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
|
||||||
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
|
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
|
||||||
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
|
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
|
||||||
|
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL" (default 0, means no ttl)
|
||||||
-c, --config string config path default /etc/taos/taosadapter.toml
|
-c, --config string config path default /etc/taos/taosadapter.toml
|
||||||
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
|
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
|
||||||
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
|
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
|
||||||
|
@ -100,6 +101,7 @@ Usage of taosAdapter:
|
||||||
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
|
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
|
||||||
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
|
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
|
||||||
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
|
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
|
||||||
|
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"(default 0, means no ttl)
|
||||||
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
|
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
|
||||||
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
|
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
|
||||||
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
|
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
|
||||||
|
@ -110,6 +112,7 @@ Usage of taosAdapter:
|
||||||
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
|
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
|
||||||
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
|
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
|
||||||
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
|
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
|
||||||
|
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"(default 0, means no ttl)
|
||||||
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s)
|
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s)
|
||||||
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000)
|
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000)
|
||||||
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000)
|
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000)
|
||||||
|
@ -131,6 +134,7 @@ Usage of taosAdapter:
|
||||||
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
|
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
|
||||||
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
|
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
|
||||||
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
|
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
|
||||||
|
--statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL" (default 0, means no ttl)
|
||||||
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
|
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
|
||||||
--version Print the version and exit
|
--version Print the version and exit
|
||||||
```
|
```
|
||||||
|
@ -195,6 +199,7 @@ Support InfluxDB query parameters as follows.
|
||||||
- `precision` The time precision used by TDengine
|
- `precision` The time precision used by TDengine
|
||||||
- `u` TDengine user name
|
- `u` TDengine user name
|
||||||
- `p` TDengine password
|
- `p` TDengine password
|
||||||
|
- `ttl` The time to live of automatically created sub-table. This value cannot be updated. TDengine will use the ttl value of the frist data of sub-table to create sub-table. For more information, please refer [Create Table](/taos-sql/table/#create-table)
|
||||||
|
|
||||||
Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported.
|
Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported.
|
||||||
Example: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
|
Example: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
|
||||||
|
|
|
@ -204,6 +204,12 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
||||||
- **-a/--replica <replicaNum\>** :
|
- **-a/--replica <replicaNum\>** :
|
||||||
Specify the number of replicas when creating the database. The default value is 1.
|
Specify the number of replicas when creating the database. The default value is 1.
|
||||||
|
|
||||||
|
- **-k/--keep-trying <NUMBER\>** :
|
||||||
|
Keep trying if failed to insert, default is no. Available with v3.0.9+.
|
||||||
|
|
||||||
|
- **-z/--trying-interval <NUMBER\>** :
|
||||||
|
Specify interval between keep trying insert. Valid value is a postive number. Only valid when keep trying be enabled. Available with v3.0.9+.
|
||||||
|
|
||||||
- **-V/--version** :
|
- **-V/--version** :
|
||||||
Show version information only. Users should not use it with other parameters.
|
Show version information only. Users should not use it with other parameters.
|
||||||
|
|
||||||
|
@ -231,6 +237,10 @@ The parameters listed in this section apply to all function modes.
|
||||||
|
|
||||||
`filetype` must be set to `insert` in the insertion scenario. See [General Configuration Parameters](#General Configuration Parameters)
|
`filetype` must be set to `insert` in the insertion scenario. See [General Configuration Parameters](#General Configuration Parameters)
|
||||||
|
|
||||||
|
- ** keep_trying ** : Keep trying if failed to insert, default is no. Available with v3.0.9+.
|
||||||
|
|
||||||
|
- ** trying_interval ** : Specify interval between keep trying insert. Valid value is a postive number. Only valid when keep trying be enabled. Available with v3.0.9+.
|
||||||
|
|
||||||
#### Database related configuration parameters
|
#### Database related configuration parameters
|
||||||
|
|
||||||
The parameters related to database creation are configured in `dbinfo` in the json configuration file, as follows. The other parameters correspond to the database parameters specified when `create database` in [../../taos-sql/database].
|
The parameters related to database creation are configured in `dbinfo` in the json configuration file, as follows. The other parameters correspond to the database parameters specified when `create database` in [../../taos-sql/database].
|
||||||
|
|
|
@ -19,7 +19,7 @@ Users should not use taosdump to back up raw data, environment settings, hardwar
|
||||||
|
|
||||||
There are two ways to install taosdump:
|
There are two ways to install taosdump:
|
||||||
|
|
||||||
- Install the taosTools official installer. Please find taosTools from [All download links](https://www.tdengine.com/all-downloads) page and download and install it.
|
- Install the taosTools official installer. Please find taosTools from [Release History](https://docs.taosdata.com/releases/tools/) page and download and install it.
|
||||||
|
|
||||||
- Compile taos-tools separately and install it. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details.
|
- Compile taos-tools separately and install it. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details.
|
||||||
|
|
||||||
|
|
|
@ -153,11 +153,11 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
| Meaning | Execution policy for query statements |
|
| Meaning | Execution policy for query statements |
|
||||||
| Unit | None |
|
| Unit | None |
|
||||||
| Default | 1 |
|
| Default | 1 |
|
||||||
| Notes | 1: Run queries on vnodes and not on qnodes |
|
| Value Range | 1: Run queries on vnodes and not on qnodes
|
||||||
|
|
||||||
2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes.
|
2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes.
|
||||||
|
|
||||||
3: Only run scan operators on vnodes; run all other operators on qnodes.
|
3: Only run scan operators on vnodes; run all other operators on qnodes. |
|
||||||
|
|
||||||
### querySmaOptimize
|
### querySmaOptimize
|
||||||
|
|
||||||
|
@ -173,6 +173,14 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
|
|
||||||
1: Enable SMA indexing and perform queries from suitable statements on precomputation results.|
|
1: Enable SMA indexing and perform queries from suitable statements on precomputation results.|
|
||||||
|
|
||||||
|
### countAlwaysReturnValue
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| -------- | -------------------------------- |
|
||||||
|
| Applicable | Server only |
|
||||||
|
| Meaning | count()/hyperloglog() return value or not if the result data is NULL |
|
||||||
|
| Vlue Range | 0:Return empty line,1:Return 0 |
|
||||||
|
| Default | 1 |
|
||||||
|
|
||||||
### maxNumOfDistinctRes
|
### maxNumOfDistinctRes
|
||||||
|
|
||||||
|
@ -307,6 +315,14 @@ The charset that takes effect is UTF-8.
|
||||||
| Meaning | All data files are stored in this directory |
|
| Meaning | All data files are stored in this directory |
|
||||||
| Default Value | /var/lib/taos |
|
| Default Value | /var/lib/taos |
|
||||||
|
|
||||||
|
### tempDir
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| -------- | ------------------------------------------ |
|
||||||
|
| Applicable | Server only |
|
||||||
|
| Meaning | The directory where to put all the temporary files generated during system running |
|
||||||
|
| Default | /tmp |
|
||||||
|
|
||||||
### minimalTmpDirGB
|
### minimalTmpDirGB
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
|
@ -336,89 +352,6 @@ The charset that takes effect is UTF-8.
|
||||||
| Value Range | 0-4096 |
|
| Value Range | 0-4096 |
|
||||||
| Default Value | 2x the CPU cores |
|
| Default Value | 2x the CPU cores |
|
||||||
|
|
||||||
## Time Parameters
|
|
||||||
|
|
||||||
### statusInterval
|
|
||||||
|
|
||||||
| Attribute | Description |
|
|
||||||
| -------- | --------------------------- |
|
|
||||||
| Applicable | Server Only |
|
|
||||||
| Meaning | the interval of dnode reporting status to mnode |
|
|
||||||
| Unit | second |
|
|
||||||
| Value Range | 1-10 |
|
|
||||||
| Default Value | 1 |
|
|
||||||
|
|
||||||
### shellActivityTimer
|
|
||||||
|
|
||||||
| Attribute | Description |
|
|
||||||
| -------- | --------------------------------- |
|
|
||||||
| Applicable | Server and Client |
|
|
||||||
| Meaning | The interval for TDengine CLI to send heartbeat to mnode |
|
|
||||||
| Unit | second |
|
|
||||||
| Value Range | 1-120 |
|
|
||||||
| Default Value | 3 |
|
|
||||||
|
|
||||||
## Performance Optimization Parameters
|
|
||||||
|
|
||||||
### numOfCommitThreads
|
|
||||||
|
|
||||||
| Attribute | Description |
|
|
||||||
| -------- | ---------------------- |
|
|
||||||
| Applicable | Server Only |
|
|
||||||
| Meaning | Maximum of threads for committing to disk |
|
|
||||||
| Default Value | |
|
|
||||||
|
|
||||||
## Compression Parameters
|
|
||||||
|
|
||||||
### compressMsgSize
|
|
||||||
|
|
||||||
| Attribute | Description |
|
|
||||||
| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| Applicable | Server Only |
|
|
||||||
| Meaning | The threshold for message size to compress the message. | Set the value to 64330 bytes for good message compression. |
|
|
||||||
| Unit | bytes |
|
|
||||||
| Value Range | 0: already compress; >0: compress when message exceeds it; -1: always uncompress |
|
|
||||||
| Default Value | -1 |
|
|
||||||
|
|
||||||
### compressColData
|
|
||||||
|
|
||||||
| Attribute | Description |
|
|
||||||
| -------- | --------------------------------------------------------------------------------------- |
|
|
||||||
| Applicable | Server Only |
|
|
||||||
| Meaning | The threshold for size of column data to trigger compression for the query result |
|
|
||||||
| Unit | bytes |
|
|
||||||
| Value Range | 0: always compress; >0: only compress when the size of any column data exceeds the threshold; -1: always uncompress |
|
|
||||||
| Default Value | -1 |
|
|
||||||
| Default Value | -1 |
|
|
||||||
| Note | available from version 2.3.0.0 | |
|
|
||||||
|
|
||||||
## Continuous Query Parameters |
|
|
||||||
|
|
||||||
### minSlidingTime
|
|
||||||
|
|
||||||
| Attribute | Description |
|
|
||||||
| ------------- | -------------------------------------------------------- |
|
|
||||||
| Applicable | Server Only |
|
|
||||||
| Meaning | Minimum sliding time of time window |
|
|
||||||
| Unit | millisecond or microsecond , depending on time precision |
|
|
||||||
| Value Range | 10-1000000 |
|
|
||||||
| Default Value | 10 |
|
|
||||||
|
|
||||||
### minIntervalTime
|
|
||||||
|
|
||||||
| Attribute | Description |
|
|
||||||
| ------------- | --------------------------- |
|
|
||||||
| Applicable | Server Only |
|
|
||||||
| Meaning | Minimum size of time window |
|
|
||||||
| Unit | millisecond |
|
|
||||||
| Value Range | 1-1000000 |
|
|
||||||
| Default Value | 10 |
|
|
||||||
|
|
||||||
:::info
|
|
||||||
To prevent system resource from being exhausted by multiple concurrent streams, a random delay is applied on each stream automatically. `maxFirstStreamCompDelay` is the maximum delay time before a continuous query is started the first time. `streamCompDelayRatio` is the ratio for calculating delay time, with the size of the time window as base. `maxStreamCompDelay` is the maximum delay time. The actual delay time is a random time not bigger than `maxStreamCompDelay`. If a continuous query fails, `retryStreamComDelay` is the delay time before retrying it, also not bigger than `maxStreamCompDelay`.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
## Log Parameters
|
## Log Parameters
|
||||||
|
|
||||||
### logDir
|
### logDir
|
||||||
|
@ -665,6 +598,18 @@ To prevent system resource from being exhausted by multiple concurrent streams,
|
||||||
| Value Range | 0: not consistent; 1: consistent. |
|
| Value Range | 0: not consistent; 1: consistent. |
|
||||||
| Default | 1 |
|
| Default | 1 |
|
||||||
|
|
||||||
|
## Compress Parameters
|
||||||
|
|
||||||
|
### compressMsgSize
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| -------- | ----------------------------- |
|
||||||
|
| Applicable | Both Client and Server side |
|
||||||
|
| Meaning | Whether RPC message is compressed |
|
||||||
|
| Value Range | -1: none message is compressed; 0: all messages are compressed; N (N>0): messages exceeding N bytes are compressed |
|
||||||
|
| Default | -1 |
|
||||||
|
|
||||||
|
|
||||||
## Other Parameters
|
## Other Parameters
|
||||||
|
|
||||||
### enableCoreFile
|
### enableCoreFile
|
||||||
|
@ -686,172 +631,60 @@ To prevent system resource from being exhausted by multiple concurrent streams,
|
||||||
| Value Range | 0: disable UDF; 1: enabled UDF |
|
| Value Range | 0: disable UDF; 1: enabled UDF |
|
||||||
| Default Value | 1 |
|
| Default Value | 1 |
|
||||||
|
|
||||||
## Parameter Comparison of TDengine 2.x and 3.0
|
|
||||||
| # | **Parameter** | **In 2.x** | **In 3.0** |
|
## 3.0 Parameters
|
||||||
| --- | :-----------------: | --------------- | --------------- |
|
|
||||||
| 1 | firstEp | Yes | Yes |
|
| # | **参数** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
|
||||||
| 2 | secondEp | Yes | Yes |
|
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
|
||||||
| 3 | fqdn | Yes | Yes |
|
| 1 | firstEp | Yes | Yes | |
|
||||||
| 4 | serverPort | Yes | Yes |
|
| 2 | secondEp | Yes | Yes | |
|
||||||
| 5 | maxShellConns | Yes | Yes |
|
| 3 | fqdn | Yes | Yes | |
|
||||||
| 6 | monitor | Yes | Yes |
|
| 4 | serverPort | Yes | Yes | |
|
||||||
| 7 | monitorFqdn | No | Yes |
|
| 5 | maxShellConns | Yes | Yes | |
|
||||||
| 8 | monitorPort | No | Yes |
|
| 6 | monitor | Yes | Yes | |
|
||||||
| 9 | monitorInterval | Yes | Yes |
|
| 7 | monitorFqdn | No | Yes | |
|
||||||
| 10 | monitorMaxLogs | No | Yes |
|
| 8 | monitorPort | No | Yes | |
|
||||||
| 11 | monitorComp | No | Yes |
|
| 9 | monitorInterval | Yes | Yes | |
|
||||||
| 12 | telemetryReporting | Yes | Yes |
|
| 10 | queryPolicy | No | Yes | |
|
||||||
| 13 | telemetryInterval | No | Yes |
|
| 11 | querySmaOptimize | No | Yes | |
|
||||||
| 14 | telemetryServer | No | Yes |
|
| 12 | maxNumOfDistinctRes | Yes | Yes | |
|
||||||
| 15 | telemetryPort | No | Yes |
|
| 15 | countAlwaysReturnValue | Yes | Yes | |
|
||||||
| 16 | queryPolicy | No | Yes |
|
| 16 | dataDir | Yes | Yes | |
|
||||||
| 17 | querySmaOptimize | No | Yes |
|
| 17 | minimalDataDirGB | Yes | Yes | |
|
||||||
| 18 | queryRsmaTolerance | No | Yes |
|
| 18 | supportVnodes | No | Yes | |
|
||||||
| 19 | queryBufferSize | Yes | Yes |
|
| 19 | tempDir | Yes | Yes | |
|
||||||
| 20 | maxNumOfDistinctRes | Yes | Yes |
|
| 20 | minimalTmpDirGB | Yes | Yes | |
|
||||||
| 21 | minSlidingTime | Yes | Yes |
|
| 21 | smlChildTableName | Yes | Yes | |
|
||||||
| 22 | minIntervalTime | Yes | Yes |
|
| 22 | smlTagName | Yes | Yes | |
|
||||||
| 23 | countAlwaysReturnValue | Yes | Yes |
|
| 23 | smlDataFormat | No | Yes | |
|
||||||
| 24 | dataDir | Yes | Yes |
|
| 24 | statusInterval | Yes | Yes | |
|
||||||
| 25 | minimalDataDirGB | Yes | Yes |
|
| 25 | logDir | Yes | Yes | |
|
||||||
| 26 | supportVnodes | No | Yes |
|
| 26 | minimalLogDirGB | Yes | Yes | |
|
||||||
| 27 | tempDir | Yes | Yes |
|
| 27 | numOfLogLines | Yes | Yes | |
|
||||||
| 28 | minimalTmpDirGB | Yes | Yes |
|
| 28 | asyncLog | Yes | Yes | |
|
||||||
| 29 | compressMsgSize | Yes | Yes |
|
| 29 | logKeepDays | Yes | Yes | |
|
||||||
| 30 | compressColData | Yes | Yes |
|
| 30 | debugFlag | Yes | Yes | |
|
||||||
| 31 | smlChildTableName | Yes | Yes |
|
| 31 | tmrDebugFlag | Yes | Yes | |
|
||||||
| 32 | smlTagName | Yes | Yes |
|
| 32 | uDebugFlag | Yes | Yes | |
|
||||||
| 33 | smlDataFormat | No | Yes |
|
| 33 | rpcDebugFlag | Yes | Yes | |
|
||||||
| 34 | statusInterval | Yes | Yes |
|
| 34 | jniDebugFlag | Yes | Yes | |
|
||||||
| 35 | shellActivityTimer | Yes | Yes |
|
| 35 | qDebugFlag | Yes | Yes | |
|
||||||
| 36 | transPullupInterval | No | Yes |
|
| 36 | cDebugFlag | Yes | Yes | |
|
||||||
| 37 | mqRebalanceInterval | No | Yes |
|
| 37 | dDebugFlag | Yes | Yes | |
|
||||||
| 38 | ttlUnit | No | Yes |
|
| 38 | vDebugFlag | Yes | Yes | |
|
||||||
| 39 | ttlPushInterval | No | Yes |
|
| 39 | mDebugFlag | Yes | Yes | |
|
||||||
| 40 | numOfTaskQueueThreads | No | Yes |
|
| 40 | wDebugFlag | Yes | Yes | |
|
||||||
| 41 | numOfRpcThreads | No | Yes |
|
| 41 | sDebugFlag | Yes | Yes | |
|
||||||
| 42 | numOfCommitThreads | Yes | Yes |
|
| 42 | tsdbDebugFlag | Yes | Yes | |
|
||||||
| 43 | numOfMnodeReadThreads | No | Yes |
|
| 43 | tqDebugFlag | No | Yes | |
|
||||||
| 44 | numOfVnodeQueryThreads | No | Yes |
|
| 44 | fsDebugFlag | Yes | Yes | |
|
||||||
| 45 | numOfVnodeStreamThreads | No | Yes |
|
| 45 | udfDebugFlag | No | Yes | |
|
||||||
| 46 | numOfVnodeFetchThreads | No | Yes |
|
| 46 | smaDebugFlag | No | Yes | |
|
||||||
| 47 | numOfVnodeRsmaThreads | No | Yes |
|
| 47 | idxDebugFlag | No | Yes | |
|
||||||
| 48 | numOfQnodeQueryThreads | No | Yes |
|
| 48 | tdbDebugFlag | No | Yes | |
|
||||||
| 49 | numOfQnodeFetchThreads | No | Yes |
|
| 49 | metaDebugFlag | No | Yes | |
|
||||||
| 50 | numOfSnodeSharedThreads | No | Yes |
|
| 50 | timezone | Yes | Yes | |
|
||||||
| 51 | numOfSnodeUniqueThreads | No | Yes |
|
| 51 | locale | Yes | Yes | |
|
||||||
| 52 | rpcQueueMemoryAllowed | No | Yes |
|
| 52 | charset | Yes | Yes | |
|
||||||
| 53 | logDir | Yes | Yes |
|
| 53 | udf | Yes | Yes | |
|
||||||
| 54 | minimalLogDirGB | Yes | Yes |
|
| 54 | enableCoreFile | Yes | Yes | |
|
||||||
| 55 | numOfLogLines | Yes | Yes |
|
|
||||||
| 56 | asyncLog | Yes | Yes |
|
|
||||||
| 57 | logKeepDays | Yes | Yes |
|
|
||||||
| 60 | debugFlag | Yes | Yes |
|
|
||||||
| 61 | tmrDebugFlag | Yes | Yes |
|
|
||||||
| 62 | uDebugFlag | Yes | Yes |
|
|
||||||
| 63 | rpcDebugFlag | Yes | Yes |
|
|
||||||
| 64 | jniDebugFlag | Yes | Yes |
|
|
||||||
| 65 | qDebugFlag | Yes | Yes |
|
|
||||||
| 66 | cDebugFlag | Yes | Yes |
|
|
||||||
| 67 | dDebugFlag | Yes | Yes |
|
|
||||||
| 68 | vDebugFlag | Yes | Yes |
|
|
||||||
| 69 | mDebugFlag | Yes | Yes |
|
|
||||||
| 70 | wDebugFlag | Yes | Yes |
|
|
||||||
| 71 | sDebugFlag | Yes | Yes |
|
|
||||||
| 72 | tsdbDebugFlag | Yes | Yes |
|
|
||||||
| 73 | tqDebugFlag | No | Yes |
|
|
||||||
| 74 | fsDebugFlag | Yes | Yes |
|
|
||||||
| 75 | udfDebugFlag | No | Yes |
|
|
||||||
| 76 | smaDebugFlag | No | Yes |
|
|
||||||
| 77 | idxDebugFlag | No | Yes |
|
|
||||||
| 78 | tdbDebugFlag | No | Yes |
|
|
||||||
| 79 | metaDebugFlag | No | Yes |
|
|
||||||
| 80 | timezone | Yes | Yes |
|
|
||||||
| 81 | locale | Yes | Yes |
|
|
||||||
| 82 | charset | Yes | Yes |
|
|
||||||
| 83 | udf | Yes | Yes |
|
|
||||||
| 84 | enableCoreFile | Yes | Yes |
|
|
||||||
| 85 | arbitrator | Yes | No |
|
|
||||||
| 86 | numOfThreadsPerCore | Yes | No |
|
|
||||||
| 87 | numOfMnodes | Yes | No |
|
|
||||||
| 88 | vnodeBak | Yes | No |
|
|
||||||
| 89 | balance | Yes | No |
|
|
||||||
| 90 | balanceInterval | Yes | No |
|
|
||||||
| 91 | offlineThreshold | Yes | No |
|
|
||||||
| 92 | role | Yes | No |
|
|
||||||
| 93 | dnodeNopLoop | Yes | No |
|
|
||||||
| 94 | keepTimeOffset | Yes | No |
|
|
||||||
| 95 | rpcTimer | Yes | No |
|
|
||||||
| 96 | rpcMaxTime | Yes | No |
|
|
||||||
| 97 | rpcForceTcp | Yes | No |
|
|
||||||
| 98 | tcpConnTimeout | Yes | No |
|
|
||||||
| 99 | syncCheckInterval | Yes | No |
|
|
||||||
| 100 | maxTmrCtrl | Yes | No |
|
|
||||||
| 101 | monitorReplica | Yes | No |
|
|
||||||
| 102 | smlTagNullName | Yes | No |
|
|
||||||
| 103 | keepColumnName | Yes | No |
|
|
||||||
| 104 | ratioOfQueryCores | Yes | No |
|
|
||||||
| 105 | maxStreamCompDelay | Yes | No |
|
|
||||||
| 106 | maxFirstStreamCompDelay | Yes | No |
|
|
||||||
| 107 | retryStreamCompDelay | Yes | No |
|
|
||||||
| 108 | streamCompDelayRatio | Yes | No |
|
|
||||||
| 109 | maxVgroupsPerDb | Yes | No |
|
|
||||||
| 110 | maxTablesPerVnode | Yes | No |
|
|
||||||
| 111 | minTablesPerVnode | Yes | No |
|
|
||||||
| 112 | tableIncStepPerVnode | Yes | No |
|
|
||||||
| 113 | cache | Yes | No |
|
|
||||||
| 114 | blocks | Yes | No |
|
|
||||||
| 115 | days | Yes | No |
|
|
||||||
| 116 | keep | Yes | No |
|
|
||||||
| 117 | minRows | Yes | No |
|
|
||||||
| 118 | maxRows | Yes | No |
|
|
||||||
| 119 | quorum | Yes | No |
|
|
||||||
| 120 | comp | Yes | No |
|
|
||||||
| 121 | walLevel | Yes | No |
|
|
||||||
| 122 | fsync | Yes | No |
|
|
||||||
| 123 | replica | Yes | No |
|
|
||||||
| 124 | partitions | Yes | No |
|
|
||||||
| 125 | quorum | Yes | No |
|
|
||||||
| 126 | update | Yes | No |
|
|
||||||
| 127 | cachelast | Yes | No |
|
|
||||||
| 128 | maxSQLLength | Yes | No |
|
|
||||||
| 129 | maxWildCardsLength | Yes | No |
|
|
||||||
| 130 | maxRegexStringLen | Yes | No |
|
|
||||||
| 131 | maxNumOfOrderedRes | Yes | No |
|
|
||||||
| 132 | maxConnections | Yes | No |
|
|
||||||
| 133 | mnodeEqualVnodeNum | Yes | No |
|
|
||||||
| 134 | http | Yes | No |
|
|
||||||
| 135 | httpEnableRecordSql | Yes | No |
|
|
||||||
| 136 | httpMaxThreads | Yes | No |
|
|
||||||
| 137 | restfulRowLimit | Yes | No |
|
|
||||||
| 138 | httpDbNameMandatory | Yes | No |
|
|
||||||
| 139 | httpKeepAlive | Yes | No |
|
|
||||||
| 140 | enableRecordSql | Yes | No |
|
|
||||||
| 141 | maxBinaryDisplayWidth | Yes | No |
|
|
||||||
| 142 | stream | Yes | No |
|
|
||||||
| 143 | retrieveBlockingModel | Yes | No |
|
|
||||||
| 144 | tsdbMetaCompactRatio | Yes | No |
|
|
||||||
| 145 | defaultJSONStrType | Yes | No |
|
|
||||||
| 146 | walFlushSize | Yes | No |
|
|
||||||
| 147 | keepTimeOffset | Yes | No |
|
|
||||||
| 148 | flowctrl | Yes | No |
|
|
||||||
| 149 | slaveQuery | Yes | No |
|
|
||||||
| 150 | adjustMaster | Yes | No |
|
|
||||||
| 151 | topicBinaryLen | Yes | No |
|
|
||||||
| 152 | telegrafUseFieldNum | Yes | No |
|
|
||||||
| 153 | deadLockKillQuery | Yes | No |
|
|
||||||
| 154 | clientMerge | Yes | No |
|
|
||||||
| 155 | sdbDebugFlag | Yes | No |
|
|
||||||
| 156 | odbcDebugFlag | Yes | No |
|
|
||||||
| 157 | httpDebugFlag | Yes | No |
|
|
||||||
| 158 | monDebugFlag | Yes | No |
|
|
||||||
| 159 | cqDebugFlag | Yes | No |
|
|
||||||
| 160 | shortcutFlag | Yes | No |
|
|
||||||
| 161 | probeSeconds | Yes | No |
|
|
||||||
| 162 | probeKillSeconds | Yes | No |
|
|
||||||
| 163 | probeInterval | Yes | No |
|
|
||||||
| 164 | lossyColumns | Yes | No |
|
|
||||||
| 165 | fPrecision | Yes | No |
|
|
||||||
| 166 | dPrecision | Yes | No |
|
|
||||||
| 167 | maxRange | Yes | No |
|
|
||||||
| 168 | range | Yes | No |
|
|
||||||
|
|
|
@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 3.0.2.0
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.2.0" />
|
||||||
|
|
||||||
## 3.0.1.8
|
## 3.0.1.8
|
||||||
|
|
||||||
<Release type="tdengine" version="3.0.1.8" />
|
<Release type="tdengine" version="3.0.1.8" />
|
||||||
|
|
|
@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 2.3.2
|
||||||
|
|
||||||
|
<Release type="tools" version="2.3.2" />
|
||||||
|
|
||||||
## 2.3.0
|
## 2.3.0
|
||||||
|
|
||||||
<Release type="tools" version="2.3.0" />
|
<Release type="tools" version="2.3.0" />
|
||||||
|
|
|
@ -8,7 +8,7 @@ conn.execute("CREATE DATABASE test")
|
||||||
# change database. same as execute "USE db"
|
# change database. same as execute "USE db"
|
||||||
conn.select_db("test")
|
conn.select_db("test")
|
||||||
conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")
|
conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")
|
||||||
affected_row: int = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m 24.4)")
|
affected_row: int = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)")
|
||||||
print("affected_row", affected_row)
|
print("affected_row", affected_row)
|
||||||
# output:
|
# output:
|
||||||
# affected_row 3
|
# affected_row 3
|
||||||
|
|
|
@ -47,7 +47,6 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
- 对于 JSON 格式协议,TDengine 并不会自动把所有标签转成 NCHAR 类型, 字符串将将转为 NCHAR 类型, 数值将同样转换为 DOUBLE 类型。
|
- 对于 JSON 格式协议,TDengine 并不会自动把所有标签转成 NCHAR 类型, 字符串将将转为 NCHAR 类型, 数值将同样转换为 DOUBLE 类型。
|
||||||
- TDengine 只接收 JSON **数组格式**的字符串,即使一行数据也需要转换成数组形式。
|
|
||||||
- 默认生成的子表名是根据规则生成的唯一 ID 值。用户也可以通过在 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
|
- 默认生成的子表名是根据规则生成的唯一 ID 值。用户也可以通过在 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
|
|
@ -68,39 +68,38 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
|
||||||
### 安装连接器
|
### 安装连接器
|
||||||
|
|
||||||
<Tabs defaultValue="maven">
|
<Tabs defaultValue="maven">
|
||||||
<TabItem value="maven" label="使用 Maven 安装">
|
<TabItem value="maven" label="使用 Maven 安装">
|
||||||
|
|
||||||
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver)
|
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 仓库,且各大仓库都已同步。
|
||||||
仓库,且各大仓库都已同步。
|
|
||||||
|
|
||||||
- [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver)
|
- [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver)
|
||||||
- [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver)
|
- [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver)
|
||||||
- [maven.aliyun](https://maven.aliyun.com/mvn/search)
|
- [maven.aliyun](https://maven.aliyun.com/mvn/search)
|
||||||
|
|
||||||
Maven 项目中,在 pom.xml 中添加以下依赖:
|
Maven 项目中,在 pom.xml 中添加以下依赖:
|
||||||
|
|
||||||
```xml-dtd
|
```xml-dtd
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.0.0</version>
|
<version>3.0.0</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
```
|
```
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem value="source" label="使用源码编译安装">
|
<TabItem value="source" label="使用源码编译安装">
|
||||||
|
|
||||||
可以通过下载 TDengine 的源码,自己编译最新版本的 Java connector
|
可以通过下载 TDengine 的源码,自己编译最新版本的 Java connector
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
git clone https://github.com/taosdata/taos-connector-jdbc.git
|
git clone https://github.com/taosdata/taos-connector-jdbc.git
|
||||||
cd taos-connector-jdbc
|
cd taos-connector-jdbc
|
||||||
mvn clean install -Dmaven.test.skip=true
|
mvn clean install -Dmaven.test.skip=true
|
||||||
```
|
```
|
||||||
|
|
||||||
编译后,在 target 目录下会产生 taos-jdbcdriver-3.0.*-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。
|
编译后,在 target 目录下会产生 taos-jdbcdriver-3.0.*-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
## 建立连接
|
## 建立连接
|
||||||
|
@ -111,125 +110,117 @@ TDengine 的 JDBC URL 规范格式为:
|
||||||
对于建立连接,原生连接与 REST 连接有细微不同。
|
对于建立连接,原生连接与 REST 连接有细微不同。
|
||||||
|
|
||||||
<Tabs defaultValue="rest">
|
<Tabs defaultValue="rest">
|
||||||
<TabItem value="native" label="原生连接">
|
<TabItem value="native" label="原生连接">
|
||||||
|
|
||||||
```java
|
```java
|
||||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||||
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
|
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
|
||||||
Connection conn = DriverManager.getConnection(jdbcUrl);
|
Connection conn = DriverManager.getConnection(jdbcUrl);
|
||||||
```
|
```
|
||||||
|
|
||||||
以上示例,使用了 JDBC 原生连接的 TSDBDriver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 test 的连接。这个 URL
|
以上示例,使用了 JDBC 原生连接的 TSDBDriver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 test 的连接。这个 URL
|
||||||
中指定用户名(user)为 root,密码(password)为 taosdata。
|
中指定用户名(user)为 root,密码(password)为 taosdata。
|
||||||
|
|
||||||
**注意**:使用 JDBC 原生连接,taos-jdbcdriver 需要依赖客户端驱动(Linux 下是 libtaos.so;Windows 下是 taos.dll;macOS 下是 libtaos.dylib)。
|
**注意**:使用 JDBC 原生连接,taos-jdbcdriver 需要依赖客户端驱动(Linux 下是 libtaos.so;Windows 下是 taos.dll;macOS 下是 libtaos.dylib)。
|
||||||
|
|
||||||
url 中的配置参数如下:
|
url 中的配置参数如下:
|
||||||
|
|
||||||
- user:登录 TDengine 用户名,默认值 'root'。
|
- user:登录 TDengine 用户名,默认值 'root'。
|
||||||
- password:用户登录密码,默认值 'taosdata'。
|
- password:用户登录密码,默认值 'taosdata'。
|
||||||
- cfgdir:客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。
|
- cfgdir:客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。
|
||||||
- charset:客户端使用的字符集,默认值为系统字符集。
|
- charset:客户端使用的字符集,默认值为系统字符集。
|
||||||
- locale:客户端语言环境,默认值系统当前 locale。
|
- locale:客户端语言环境,默认值系统当前 locale。
|
||||||
- timezone:客户端使用的时区,默认值为系统当前时区。
|
- timezone:客户端使用的时区,默认值为系统当前时区。
|
||||||
- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:true。开启批量拉取同时获取一批数据在查询数据量较大时批量拉取可以有效的提升查询性能。
|
- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:true。开启批量拉取同时获取一批数据在查询数据量较大时批量拉取可以有效的提升查询性能。
|
||||||
- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败将继续执行下面的 SQL。false:不再执行失败 SQL
|
- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败将继续执行下面的 SQL。false:不再执行失败 SQL 后的任何语句。默认值为:false。
|
||||||
后的任何语句。默认值为:false。
|
|
||||||
|
|
||||||
JDBC 原生连接的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。
|
JDBC 原生连接的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。
|
||||||
|
|
||||||
**使用 TDengine 客户端驱动配置文件建立连接 **
|
**使用 TDengine 客户端驱动配置文件建立连接 **
|
||||||
|
|
||||||
当使用 JDBC 原生连接连接 TDengine 集群时,可以使用 TDengine 客户端驱动配置文件,在配置文件中指定集群的 firstEp、secondEp 等参数。如下所示:
|
当使用 JDBC 原生连接连接 TDengine 集群时,可以使用 TDengine 客户端驱动配置文件,在配置文件中指定集群的 firstEp、secondEp 等参数。如下所示:
|
||||||
|
|
||||||
1. 在 Java 应用中不指定 hostname 和 port
|
1. 在 Java 应用中不指定 hostname 和 port
|
||||||
|
|
||||||
```java
|
```java
|
||||||
public Connection getConn() throws Exception{
|
public Connection getConn() throws Exception{
|
||||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||||
String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata";
|
String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata";
|
||||||
Properties connProps = new Properties();
|
Properties connProps = new Properties();
|
||||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||||
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
|
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
|
||||||
return conn;
|
return conn;
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
2. 在配置文件中指定 firstEp 和 secondEp
|
2. 在配置文件中指定 firstEp 和 secondEp
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
# first fully qualified domain name (FQDN) for TDengine system
|
# first fully qualified domain name (FQDN) for TDengine system
|
||||||
firstEp cluster_node1:6030
|
firstEp cluster_node1:6030
|
||||||
|
|
||||||
# second fully qualified domain name (FQDN) for TDengine system, for cluster only
|
# second fully qualified domain name (FQDN) for TDengine system, for cluster only
|
||||||
secondEp cluster_node2:6030
|
secondEp cluster_node2:6030
|
||||||
|
|
||||||
# default system charset
|
# default system charset
|
||||||
# charset UTF-8
|
# charset UTF-8
|
||||||
|
|
||||||
# system locale
|
# system locale
|
||||||
# locale en_US.UTF-8
|
# locale en_US.UTF-8
|
||||||
```
|
```
|
||||||
|
|
||||||
以上示例,jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时,JDBC 会尝试使用 secondEp
|
以上示例,jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时,JDBC 会尝试使用 secondEp 连接集群。
|
||||||
连接集群。
|
|
||||||
|
|
||||||
TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可以正常建立到集群的连接。
|
TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可以正常建立到集群的连接。
|
||||||
|
|
||||||
> **注意**:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件,Linux OS 上默认值 /etc/taos/taos.cfg ,Windows OS 上默认值
|
> **注意**:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件,Linux OS 上默认值 /etc/taos/taos.cfg ,Windows OS 上默认值 C://TDengine/cfg/taos.cfg。
|
||||||
C://TDengine/cfg/taos.cfg。
|
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem value="rest" label="REST 连接">
|
<TabItem value="rest" label="REST 连接">
|
||||||
|
|
||||||
```java
|
```java
|
||||||
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
|
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
|
||||||
String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
|
String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
|
||||||
Connection conn = DriverManager.getConnection(jdbcUrl);
|
Connection conn = DriverManager.getConnection(jdbcUrl);
|
||||||
```
|
```
|
||||||
|
|
||||||
以上示例,使用了 JDBC REST 连接的 RestfulDriver,建立了到 hostname 为 taosdemo.com,端口为 6041,数据库名为 test 的连接。这个 URL 中指定用户名(user)为
|
以上示例,使用了 JDBC REST 连接的 RestfulDriver,建立了到 hostname 为 taosdemo.com,端口为 6041,数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。
|
||||||
root,密码(password)为 taosdata。
|
|
||||||
|
|
||||||
使用 JDBC REST 连接,不需要依赖客户端驱动。与 JDBC 原生连接相比,仅需要:
|
使用 JDBC REST 连接,不需要依赖客户端驱动。与 JDBC 原生连接相比,仅需要:
|
||||||
|
|
||||||
1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”;
|
1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”;
|
||||||
2. jdbcUrl 以“jdbc:TAOS-RS://”开头;
|
2. jdbcUrl 以“jdbc:TAOS-RS://”开头;
|
||||||
3. 使用 6041 作为连接端口。
|
3. 使用 6041 作为连接端口。
|
||||||
|
|
||||||
url 中的配置参数如下:
|
url 中的配置参数如下:
|
||||||
|
|
||||||
- user:登录 TDengine 用户名,默认值 'root'。
|
- user:登录 TDengine 用户名,默认值 'root'。
|
||||||
- password:用户登录密码,默认值 'taosdata'。
|
- password:用户登录密码,默认值 'taosdata'。
|
||||||
- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。JDBC REST
|
- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。JDBC REST 连接支持批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。
|
||||||
连接支持批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。
|
- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。
|
||||||
- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。
|
- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。
|
||||||
- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL
|
- httpConnectTimeout: 连接超时时间,单位 ms, 默认值为 5000。
|
||||||
后的任何语句。默认值为:false。
|
- httpSocketTimeout: socket 超时时间,单位 ms,默认值为 5000。仅在 batchfetch 设置为 false 时生效。
|
||||||
- httpConnectTimeout: 连接超时时间,单位 ms, 默认值为 5000。
|
- messageWaitTimeout: 消息超时时间, 单位 ms, 默认值为 3000。 仅在 batchfetch 设置为 true 时生效。
|
||||||
- httpSocketTimeout: socket 超时时间,单位 ms,默认值为 5000。仅在 batchfetch 设置为 false 时生效。
|
- useSSL: 连接中是否使用 SSL。
|
||||||
- messageWaitTimeout: 消息超时时间, 单位 ms, 默认值为 3000。 仅在 batchfetch 设置为 true 时生效。
|
|
||||||
- useSSL: 连接中是否使用 SSL。
|
|
||||||
|
|
||||||
**注意**:部分配置项(比如:locale、timezone)在 REST 连接中不生效。
|
**注意**:部分配置项(比如:locale、timezone)在 REST 连接中不生效。
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
- 与原生连接方式不同,REST 接口是无状态的。在使用 JDBC REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。例如:
|
- 与原生连接方式不同,REST 接口是无状态的。在使用 JDBC REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。例如:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6);
|
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6);
|
||||||
```
|
```
|
||||||
|
|
||||||
- 如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为
|
- 如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6);
|
||||||
jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature)
|
|
||||||
tags('California.SanFrancisco') values(now, 24.6);
|
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
### 指定 URL 和 Properties 获取连接
|
### 指定 URL 和 Properties 获取连接
|
||||||
|
@ -890,8 +881,10 @@ public static void main(String[] args) throws Exception {
|
||||||
|
|
||||||
| taos-jdbcdriver 版本 | 主要变化 |
|
| taos-jdbcdriver 版本 | 主要变化 |
|
||||||
| :------------------: | :----------------------------: |
|
| :------------------: | :----------------------------: |
|
||||||
|
| 3.0.3 | 修复 REST 连接在 jdk17+ 版本时间戳解析错误问题 |
|
||||||
| 3.0.1 - 3.0.2 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用 3.0.2 版本 |
|
| 3.0.1 - 3.0.2 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用 3.0.2 版本 |
|
||||||
| 3.0.0 | 支持 TDengine 3.0 |
|
| 3.0.0 | 支持 TDengine 3.0 |
|
||||||
|
| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 |
|
||||||
| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 |
|
| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 |
|
||||||
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 |
|
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 |
|
||||||
| 2.0.38 | JDBC REST 连接增加批量拉取功能 |
|
| 2.0.38 | JDBC REST 连接增加批量拉取功能 |
|
||||||
|
@ -928,7 +921,7 @@ public static void main(String[] args) throws Exception {
|
||||||
|
|
||||||
**原因**:taos-jdbcdriver 3.0.1 版本需要在 JDK 11+ 环境使用。
|
**原因**:taos-jdbcdriver 3.0.1 版本需要在 JDK 11+ 环境使用。
|
||||||
|
|
||||||
**解决方法**: 更换 taos-jdbcdriver 3.0.2 版本。
|
**解决方法**: 更换 taos-jdbcdriver 3.0.2+ 版本。
|
||||||
|
|
||||||
其它问题请参考 [FAQ](../../../train-faq/faq)
|
其它问题请参考 [FAQ](../../../train-faq/faq)
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,6 @@ database_option: {
|
||||||
| PRECISION {'ms' | 'us' | 'ns'}
|
| PRECISION {'ms' | 'us' | 'ns'}
|
||||||
| REPLICA value
|
| REPLICA value
|
||||||
| RETENTIONS ingestion_duration:keep_duration ...
|
| RETENTIONS ingestion_duration:keep_duration ...
|
||||||
| STRICT {'off' | 'on'}
|
|
||||||
| WAL_LEVEL {1 | 2}
|
| WAL_LEVEL {1 | 2}
|
||||||
| VGROUPS value
|
| VGROUPS value
|
||||||
| SINGLE_STABLE {0 | 1}
|
| SINGLE_STABLE {0 | 1}
|
||||||
|
@ -61,9 +60,6 @@ database_option: {
|
||||||
- PRECISION:数据库的时间戳精度。ms 表示毫秒,us 表示微秒,ns 表示纳秒,默认 ms 毫秒。
|
- PRECISION:数据库的时间戳精度。ms 表示毫秒,us 表示微秒,ns 表示纳秒,默认 ms 毫秒。
|
||||||
- REPLICA:表示数据库副本数,取值为 1 或 3,默认为 1。在集群中使用,副本数必须小于或等于 DNODE 的数目。
|
- REPLICA:表示数据库副本数,取值为 1 或 3,默认为 1。在集群中使用,副本数必须小于或等于 DNODE 的数目。
|
||||||
- RETENTIONS:表示数据的聚合周期和保存时长,如 RETENTIONS 15s:7d,1m:21d,15m:50d 表示数据原始采集周期为 15 秒,原始数据保存 7 天;按 1 分钟聚合的数据保存 21 天;按 15 分钟聚合的数据保存 50 天。目前支持且只支持三级存储周期。
|
- RETENTIONS:表示数据的聚合周期和保存时长,如 RETENTIONS 15s:7d,1m:21d,15m:50d 表示数据原始采集周期为 15 秒,原始数据保存 7 天;按 1 分钟聚合的数据保存 21 天;按 15 分钟聚合的数据保存 50 天。目前支持且只支持三级存储周期。
|
||||||
- STRICT:表示数据同步的一致性要求,默认为 off。
|
|
||||||
- on 表示强一致,即运行标准的 raft 协议,半数提交返回成功。
|
|
||||||
- off 表示弱一致,本地提交即返回成功。
|
|
||||||
- WAL_LEVEL:WAL 级别,默认为 1。
|
- WAL_LEVEL:WAL 级别,默认为 1。
|
||||||
- 1:写 WAL,但不执行 fsync。
|
- 1:写 WAL,但不执行 fsync。
|
||||||
- 2:写 WAL,而且执行 fsync。
|
- 2:写 WAL,而且执行 fsync。
|
||||||
|
|
|
@ -8,7 +8,7 @@ description: 流式计算的相关 SQL 的详细语法
|
||||||
## 创建流式计算
|
## 创建流式计算
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name AS subquery
|
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name SUBTABLE(expression) AS subquery
|
||||||
stream_options: {
|
stream_options: {
|
||||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
|
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
|
||||||
WATERMARK time
|
WATERMARK time
|
||||||
|
@ -28,6 +28,9 @@ subquery: SELECT select_list
|
||||||
|
|
||||||
支持会话窗口、状态窗口与滑动窗口,其中,会话窗口与状态窗口搭配超级表时必须与partition by tbname一起使用
|
支持会话窗口、状态窗口与滑动窗口,其中,会话窗口与状态窗口搭配超级表时必须与partition by tbname一起使用
|
||||||
|
|
||||||
|
|
||||||
|
subtable 子句定义了流式计算中创建的子表的命名规则,详见 流式计算的 partition 部分。
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
window_clause: {
|
window_clause: {
|
||||||
SESSION(ts_col, tol_val)
|
SESSION(ts_col, tol_val)
|
||||||
|
@ -49,11 +52,43 @@ SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(
|
||||||
|
|
||||||
## 流式计算的 partition
|
## 流式计算的 partition
|
||||||
|
|
||||||
可以使用 PARTITION BY TBNAME 或 PARTITION BY tag,对一个流进行多分区的计算,每个分区的时间线与时间窗口是独立的,会各自聚合,并写入到目的表中的不同子表。
|
可以使用 PARTITION BY TBNAME,tag,普通列或者表达式,对一个流进行多分区的计算,每个分区的时间线与时间窗口是独立的,会各自聚合,并写入到目的表中的不同子表。
|
||||||
|
|
||||||
不带 PARTITION BY 选项时,所有的数据将写入到一张子表。
|
不带 PARTITION BY 子句时,所有的数据将写入到一张子表。
|
||||||
|
|
||||||
流式计算创建的超级表有唯一的 tag 列 groupId,每个 partition 会被分配唯一 groupId。与 schemaless 写入一致,我们通过 MD5 计算子表名,并自动创建它。
|
在创建流时不使用 SUBTABLE 子句时,流式计算创建的超级表有唯一的 tag 列 groupId,每个 partition 会被分配唯一 groupId。与 schemaless 写入一致,我们通过 MD5 计算子表名,并自动创建它。
|
||||||
|
|
||||||
|
若创建流的语句中包含 SUBTABLE 子句,用户可以为每个 partition 对应的子表生成自定义的表名,例如:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE STREAM avg_vol_s INTO avg_vol SUBTABLE(CONCAT('new-', tname)) AS SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname tname INTERVAL(1m);
|
||||||
|
```
|
||||||
|
|
||||||
|
PARTITION 子句中,为 tbname 定义了一个别名 tname, 在PARTITION 子句中的别名可以用于 SUBTABLE 子句中的表达式计算,在上述示例中,流新创建的子表将以前缀 'new-' 连接原表名作为表名。
|
||||||
|
|
||||||
|
注意,子表名的长度若超过 TDengine 的限制,将被截断。若要生成的子表名已经存在于另一超级表,由于 TDengine 的子表名是唯一的,因此对应新子表的创建以及数据的写入将会失败。
|
||||||
|
|
||||||
|
## 流式计算读取历史数据
|
||||||
|
|
||||||
|
正常情况下,流式计算不会处理创建前已经写入源表中的数据,若要处理已经写入的数据,可以在创建流时设置 fill_history 1 选项,这样创建的流式计算会自动处理创建前、创建中、创建后写入的数据。例如:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
create stream if not exists s1 fill_history 1 into st1 as select count(*) from t1 interval(10s)
|
||||||
|
```
|
||||||
|
|
||||||
|
结合 fill_history 1 选项,可以实现只处理特定历史时间范围的数据,例如:只处理某历史时刻(2020年1月30日)之后的数据
|
||||||
|
|
||||||
|
```sql
|
||||||
|
create stream if not exists s1 fill_history 1 into st1 as select count(*) from t1 where ts > '2020-01-30' interval(10s)
|
||||||
|
```
|
||||||
|
|
||||||
|
再如,仅处理某时间段内的数据,结束时间可以是未来时间
|
||||||
|
|
||||||
|
```sql
|
||||||
|
create stream if not exists s1 fill_history 1 into st1 as select count(*) from t1 where ts > '2020-01-30' and ts < '2023-01-01' interval(10s)
|
||||||
|
```
|
||||||
|
|
||||||
|
如果该流任务已经彻底过期,并且您不再想让它检测或处理数据,您可以手动删除它,被计算出的数据仍会被保留。
|
||||||
|
|
||||||
## 删除流式计算
|
## 删除流式计算
|
||||||
|
|
||||||
|
|
|
@ -59,6 +59,7 @@ Usage of taosAdapter:
|
||||||
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
|
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
|
||||||
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
|
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
|
||||||
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
|
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
|
||||||
|
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL" (default 0, means no ttl)
|
||||||
-c, --config string config path default /etc/taos/taosadapter.toml
|
-c, --config string config path default /etc/taos/taosadapter.toml
|
||||||
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
|
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
|
||||||
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
|
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
|
||||||
|
@ -100,6 +101,7 @@ Usage of taosAdapter:
|
||||||
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
|
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
|
||||||
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
|
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
|
||||||
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
|
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
|
||||||
|
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"(default 0, means no ttl)
|
||||||
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
|
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
|
||||||
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
|
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
|
||||||
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
|
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
|
||||||
|
@ -110,6 +112,7 @@ Usage of taosAdapter:
|
||||||
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
|
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
|
||||||
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
|
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
|
||||||
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
|
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
|
||||||
|
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"(default 0, means no ttl)
|
||||||
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s)
|
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s)
|
||||||
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000)
|
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000)
|
||||||
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000)
|
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000)
|
||||||
|
@ -131,6 +134,7 @@ Usage of taosAdapter:
|
||||||
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
|
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
|
||||||
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
|
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
|
||||||
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
|
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
|
||||||
|
--statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL" (default 0, means no ttl)
|
||||||
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
|
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
|
||||||
--version Print the version and exit
|
--version Print the version and exit
|
||||||
```
|
```
|
||||||
|
@ -195,6 +199,7 @@ AllowWebSockets
|
||||||
- `precision` TDengine 使用的时间精度
|
- `precision` TDengine 使用的时间精度
|
||||||
- `u` TDengine 用户名
|
- `u` TDengine 用户名
|
||||||
- `p` TDengine 密码
|
- `p` TDengine 密码
|
||||||
|
- `ttl` 自动创建的子表生命周期,以子表的第一条数据的 TTL 参数为准,不可更新。更多信息请参考[创建表文档](taos-sql/table/#创建表)的 TTL 参数。
|
||||||
|
|
||||||
注意: 目前不支持 InfluxDB 的 token 验证方式,仅支持 Basic 验证和查询参数验证。
|
注意: 目前不支持 InfluxDB 的 token 验证方式,仅支持 Basic 验证和查询参数验证。
|
||||||
示例: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
|
示例: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
|
||||||
|
|
|
@ -204,6 +204,10 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
||||||
- **-a/--replica <replicaNum\>** :
|
- **-a/--replica <replicaNum\>** :
|
||||||
创建数据库时指定其副本数,默认值为 1 。
|
创建数据库时指定其副本数,默认值为 1 。
|
||||||
|
|
||||||
|
- ** -k/--keep-trying <NUMBER\>** : 失败后进行重试的次数,默认不重试。需使用 v3.0.9 以上版本。
|
||||||
|
|
||||||
|
- ** -z/--trying-interval <NUMBER\>** : 失败重试间隔时间,单位为毫秒,仅在 -k 指定重试后有效。需使用 v3.0.9 以上版本。
|
||||||
|
|
||||||
- **-V/--version** :
|
- **-V/--version** :
|
||||||
显示版本信息并退出。不能与其它参数混用。
|
显示版本信息并退出。不能与其它参数混用。
|
||||||
|
|
||||||
|
@ -231,6 +235,10 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
||||||
|
|
||||||
插入场景下 `filetype` 必须设置为 `insert`,该参数及其它通用参数详见[通用配置参数](#通用配置参数)
|
插入场景下 `filetype` 必须设置为 `insert`,该参数及其它通用参数详见[通用配置参数](#通用配置参数)
|
||||||
|
|
||||||
|
- ** keep_trying ** : 失败后进行重试的次数,默认不重试。需使用 v3.0.9 以上版本。
|
||||||
|
|
||||||
|
- ** trying_interval ** : 失败重试间隔时间,单位为毫秒,仅在 keep_trying 指定重试后有效。需使用 v3.0.9 以上版本。
|
||||||
|
|
||||||
#### 数据库相关配置参数
|
#### 数据库相关配置参数
|
||||||
|
|
||||||
创建数据库时的相关参数在 json 配置文件中的 `dbinfo` 中配置,个别具体参数如下。其余参数均与 TDengine 中 `create database` 时所指定的数据库参数相对应,详见[../../taos-sql/database]
|
创建数据库时的相关参数在 json 配置文件中的 `dbinfo` 中配置,个别具体参数如下。其余参数均与 TDengine 中 `create database` 时所指定的数据库参数相对应,详见[../../taos-sql/database]
|
||||||
|
|
|
@ -22,7 +22,7 @@ taosdump 是一个逻辑备份工具,它不应被用于备份任何原始数
|
||||||
|
|
||||||
taosdump 有两种安装方式:
|
taosdump 有两种安装方式:
|
||||||
|
|
||||||
- 安装 taosTools 官方安装包, 请从[所有下载链接](https://www.taosdata.com/all-downloads)页面找到 taosTools 并下载安装。
|
- 安装 taosTools 官方安装包, 请从[发布历史页面](https://docs.taosdata.com/releases/tools/)页面找到 taosTools 并下载安装。
|
||||||
|
|
||||||
- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。
|
- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。
|
||||||
|
|
||||||
|
|
|
@ -134,15 +134,6 @@ taos --dump-config
|
||||||
| 取值范围 | 1-200000 |
|
| 取值范围 | 1-200000 |
|
||||||
| 缺省值 | 30 |
|
| 缺省值 | 30 |
|
||||||
|
|
||||||
### telemetryReporting
|
|
||||||
|
|
||||||
| 属性 | 说明 |
|
|
||||||
| -------- | ---------------------------------------- |
|
|
||||||
| 适用范围 | 仅服务端适用 |
|
|
||||||
| 含义 | 是否允许 TDengine 采集和上报基本使用信息 |
|
|
||||||
| 取值范围 | 0:不允许 1:允许 |
|
|
||||||
| 缺省值 | 1 |
|
|
||||||
|
|
||||||
## 查询相关
|
## 查询相关
|
||||||
|
|
||||||
### queryPolicy
|
### queryPolicy
|
||||||
|
@ -191,6 +182,15 @@ taos --dump-config
|
||||||
| 取值范围 | 0 表示包含函数名,1 表示不包含函数名。 |
|
| 取值范围 | 0 表示包含函数名,1 表示不包含函数名。 |
|
||||||
| 缺省值 | 0 |
|
| 缺省值 | 0 |
|
||||||
|
|
||||||
|
### countAlwaysReturnValue
|
||||||
|
|
||||||
|
| 属性 | 说明 |
|
||||||
|
| -------- | -------------------------------- |
|
||||||
|
| 适用范围 | 仅服务端适用 |
|
||||||
|
| 含义 | count/hyperloglog函数在数据为空或者NULL的情况下是否返回值 |
|
||||||
|
| 取值范围 | 0:返回空行,1:返回 0 |
|
||||||
|
| 缺省值 | 1 |
|
||||||
|
|
||||||
## 区域相关
|
## 区域相关
|
||||||
|
|
||||||
### timezone
|
### timezone
|
||||||
|
@ -306,12 +306,20 @@ charset 的有效值是 UTF-8。
|
||||||
| 含义 | 数据文件目录,所有的数据文件都将写入该目录 |
|
| 含义 | 数据文件目录,所有的数据文件都将写入该目录 |
|
||||||
| 缺省值 | /var/lib/taos |
|
| 缺省值 | /var/lib/taos |
|
||||||
|
|
||||||
|
### tempDir
|
||||||
|
|
||||||
|
| 属性 | 说明 |
|
||||||
|
| -------- | ------------------------------------------ |
|
||||||
|
| 适用范围 | 仅服务端适用 |
|
||||||
|
| 含义 | 该参数指定所有系统运行过程中的临时文件生成的目录 |
|
||||||
|
| 缺省值 | /tmp |
|
||||||
|
|
||||||
### minimalTmpDirGB
|
### minimalTmpDirGB
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ------------------------------------------------ |
|
| -------- | ------------------------------------------------ |
|
||||||
| 适用范围 | 服务端和客户端均适用 |
|
| 适用范围 | 服务端和客户端均适用 |
|
||||||
| 含义 | 当日志文件夹的磁盘大小小于该值时,停止写临时文件 |
|
| 含义 | tempDir 所指定的临时文件目录所需要保留的最小空间 |
|
||||||
| 单位 | GB |
|
| 单位 | GB |
|
||||||
| 缺省值 | 1.0 |
|
| 缺省值 | 1.0 |
|
||||||
|
|
||||||
|
@ -320,7 +328,7 @@ charset 的有效值是 UTF-8。
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ------------------------------------------------ |
|
| -------- | ------------------------------------------------ |
|
||||||
| 适用范围 | 仅服务端适用 |
|
| 适用范围 | 仅服务端适用 |
|
||||||
| 含义 | 当日志文件夹的磁盘大小小于该值时,停止写时序数据 |
|
| 含义 | dataDir 指定的时序数据存储目录所需要保留的最小 |
|
||||||
| 单位 | GB |
|
| 单位 | GB |
|
||||||
| 缺省值 | 2.0 |
|
| 缺省值 | 2.0 |
|
||||||
|
|
||||||
|
@ -335,27 +343,7 @@ charset 的有效值是 UTF-8。
|
||||||
| 取值范围 | 0-4096 |
|
| 取值范围 | 0-4096 |
|
||||||
| 缺省值 | CPU 核数的 2 倍 |
|
| 缺省值 | CPU 核数的 2 倍 |
|
||||||
|
|
||||||
## 时间相关
|
## 时间相关 |
|
||||||
|
|
||||||
### statusInterval
|
|
||||||
|
|
||||||
| 属性 | 说明 |
|
|
||||||
| -------- | --------------------------- |
|
|
||||||
| 适用范围 | 仅服务端适用 |
|
|
||||||
| 含义 | dnode 向 mnode 报告状态间隔 |
|
|
||||||
| 单位 | 秒 |
|
|
||||||
| 取值范围 | 1-10 |
|
|
||||||
| 缺省值 | 1 |
|
|
||||||
|
|
||||||
### shellActivityTimer
|
|
||||||
|
|
||||||
| 属性 | 说明 |
|
|
||||||
| -------- | --------------------------------- |
|
|
||||||
| 适用范围 | 服务端和客户端均适用 |
|
|
||||||
| 含义 | shell 客户端向 mnode 发送心跳间隔 |
|
|
||||||
| 单位 | 秒 |
|
|
||||||
| 取值范围 | 1-120 |
|
|
||||||
| 缺省值 | 3 |
|
|
||||||
|
|
||||||
## 性能调优
|
## 性能调优
|
||||||
|
|
||||||
|
@ -367,28 +355,6 @@ charset 的有效值是 UTF-8。
|
||||||
| 含义 | 设置写入线程的最大数量 |
|
| 含义 | 设置写入线程的最大数量 |
|
||||||
| 缺省值 | |
|
| 缺省值 | |
|
||||||
|
|
||||||
## 压缩相关
|
|
||||||
|
|
||||||
### compressMsgSize
|
|
||||||
|
|
||||||
| 属性 | 说明 |
|
|
||||||
| -------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| 适用范围 | 仅服务端适用 |
|
|
||||||
| 含义 | 客户端与服务器之间进行消息通讯过程中,对通讯的消息进行压缩的阈值。如果要压缩消息,建议设置为 64330 字节,即大于 64330 字节的消息体才进行压缩。 |
|
|
||||||
| 单位 | bytes |
|
|
||||||
| 取值范围 | `0 `表示对所有的消息均进行压缩 >0: 超过该值的消息才进行压缩 -1: 不压缩 |
|
|
||||||
| 缺省值 | -1 |
|
|
||||||
|
|
||||||
### compressColData
|
|
||||||
|
|
||||||
| 属性 | 说明 |
|
|
||||||
| -------- | --------------------------------------------------------------------------------------- |
|
|
||||||
| 适用范围 | 仅服务端适用 |
|
|
||||||
| 含义 | 客户端与服务器之间进行消息通讯过程中,对服务器端查询结果进行列压缩的阈值。 |
|
|
||||||
| 单位 | bytes |
|
|
||||||
| 取值范围 | 0: 对所有查询结果均进行压缩 >0: 查询结果中任意列大小超过该值的消息才进行压缩 -1: 不压缩 |
|
|
||||||
| 缺省值 | -1 |
|
|
||||||
|
|
||||||
## 日志相关
|
## 日志相关
|
||||||
|
|
||||||
### logDir
|
### logDir
|
||||||
|
@ -613,7 +579,7 @@ charset 的有效值是 UTF-8。
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ------------------------- |
|
| -------- | ------------------------- |
|
||||||
| 适用范围 | 仅客户端适用 |
|
| 适用范围 | 仅客户端适用 |
|
||||||
| 含义 | schemaless 自定义的子表名 |
|
| 含义 | schemaless 自定义的子表名的 key |
|
||||||
| 类型 | 字符串 |
|
| 类型 | 字符串 |
|
||||||
| 缺省值 | 无 |
|
| 缺省值 | 无 |
|
||||||
|
|
||||||
|
@ -656,12 +622,18 @@ charset 的有效值是 UTF-8。
|
||||||
| 取值范围 | 0: 不启动;1:启动 |
|
| 取值范围 | 0: 不启动;1:启动 |
|
||||||
| 缺省值 | 1 |
|
| 缺省值 | 1 |
|
||||||
|
|
||||||
## 2.X 与 3.0 配置参数对比
|
## 压缩参数
|
||||||
|
|
||||||
:::note
|
### compressMsgSize
|
||||||
对于 2.x 版本中适用但在 3.0 版本中废弃的参数,其当前行为会有特别说明
|
|
||||||
|
|
||||||
:::
|
| 属性 | 说明 |
|
||||||
|
| -------- | ----------------------------- |
|
||||||
|
| 适用于 | 服务端和客户端均适用 |
|
||||||
|
| 含义 | 是否对 RPC 消息进行压缩 |
|
||||||
|
| 取值范围 | -1: 所有消息都不压缩; 0: 所有消息都压缩; N (N>0): 只有大于 N 个字节的消息才压缩 |
|
||||||
|
| 缺省值 | -1 |
|
||||||
|
|
||||||
|
## 3.0 中有效的配置参数列表
|
||||||
|
|
||||||
| # | **参数** | **适用于 2.X ** | **适用于 3.0 ** | 3.0 版本的当前行为 |
|
| # | **参数** | **适用于 2.X ** | **适用于 3.0 ** | 3.0 版本的当前行为 |
|
||||||
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
|
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
|
||||||
|
@ -674,159 +646,134 @@ charset 的有效值是 UTF-8。
|
||||||
| 7 | monitorFqdn | 否 | 是 | |
|
| 7 | monitorFqdn | 否 | 是 | |
|
||||||
| 8 | monitorPort | 否 | 是 | |
|
| 8 | monitorPort | 否 | 是 | |
|
||||||
| 9 | monitorInterval | 是 | 是 | |
|
| 9 | monitorInterval | 是 | 是 | |
|
||||||
| 10 | monitorMaxLogs | 否 | 是 | |
|
| 10 | queryPolicy | 否 | 是 | |
|
||||||
| 11 | monitorComp | 否 | 是 | |
|
| 11 | querySmaOptimize | 否 | 是 | |
|
||||||
| 12 | telemetryReporting | 是 | 是 | |
|
| 12 | maxNumOfDistinctRes | 是 | 是 | |
|
||||||
| 13 | telemetryInterval | 否 | 是 | |
|
| 15 | countAlwaysReturnValue | 是 | 是 | |
|
||||||
| 14 | telemetryServer | 否 | 是 | |
|
| 16 | dataDir | 是 | 是 | |
|
||||||
| 15 | telemetryPort | 否 | 是 | |
|
| 17 | minimalDataDirGB | 是 | 是 | |
|
||||||
| 16 | queryPolicy | 否 | 是 | |
|
| 18 | supportVnodes | 否 | 是 | |
|
||||||
| 17 | querySmaOptimize | 否 | 是 | |
|
| 19 | tempDir | 是 | 是 | |
|
||||||
| 18 | queryRsmaTolerance | 否 | 是 | |
|
| 20 | minimalTmpDirGB | 是 | 是 | |
|
||||||
| 19 | queryBufferSize | 是 | 是 | |
|
| 21 | smlChildTableName | 是 | 是 | |
|
||||||
| 20 | maxNumOfDistinctRes | 是 | 是 | |
|
| 22 | smlTagName | 是 | 是 | |
|
||||||
| 21 | minSlidingTime | 是 | 是 | |
|
| 23 | smlDataFormat | 否 | 是 | |
|
||||||
| 22 | minIntervalTime | 是 | 是 | |
|
| 24 | statusInterval | 是 | 是 | |
|
||||||
| 23 | countAlwaysReturnValue | 是 | 是 | |
|
| 25 | logDir | 是 | 是 | |
|
||||||
| 24 | dataDir | 是 | 是 | |
|
| 26 | minimalLogDirGB | 是 | 是 | |
|
||||||
| 25 | minimalDataDirGB | 是 | 是 | |
|
| 27 | numOfLogLines | 是 | 是 | |
|
||||||
| 26 | supportVnodes | 否 | 是 | |
|
| 28 | asyncLog | 是 | 是 | |
|
||||||
| 27 | tempDir | 是 | 是 | |
|
| 29 | logKeepDays | 是 | 是 | |
|
||||||
| 28 | minimalTmpDirGB | 是 | 是 | |
|
| 30 | debugFlag | 是 | 是 | |
|
||||||
| 29 | compressMsgSize | 是 | 是 | |
|
| 31 | tmrDebugFlag | 是 | 是 | |
|
||||||
| 30 | compressColData | 是 | 是 | |
|
| 32 | uDebugFlag | 是 | 是 | |
|
||||||
| 31 | smlChildTableName | 是 | 是 | |
|
| 33 | rpcDebugFlag | 是 | 是 | |
|
||||||
| 32 | smlTagName | 是 | 是 | |
|
| 34 | jniDebugFlag | 是 | 是 | |
|
||||||
| 33 | smlDataFormat | 否 | 是 | |
|
| 35 | qDebugFlag | 是 | 是 | |
|
||||||
| 34 | statusInterval | 是 | 是 | |
|
| 36 | cDebugFlag | 是 | 是 | |
|
||||||
| 35 | shellActivityTimer | 是 | 是 | |
|
| 37 | dDebugFlag | 是 | 是 | |
|
||||||
| 36 | transPullupInterval | 否 | 是 | |
|
| 38 | vDebugFlag | 是 | 是 | |
|
||||||
| 37 | mqRebalanceInterval | 否 | 是 | |
|
| 39 | mDebugFlag | 是 | 是 | |
|
||||||
| 38 | ttlUnit | 否 | 是 | |
|
| 40 | wDebugFlag | 是 | 是 | |
|
||||||
| 39 | ttlPushInterval | 否 | 是 | |
|
| 41 | sDebugFlag | 是 | 是 | |
|
||||||
| 40 | numOfTaskQueueThreads | 否 | 是 | |
|
| 42 | tsdbDebugFlag | 是 | 是 | |
|
||||||
| 41 | numOfRpcThreads | 否 | 是 | |
|
| 43 | tqDebugFlag | 否 | 是 | |
|
||||||
| 42 | numOfCommitThreads | 是 | 是 | |
|
| 44 | fsDebugFlag | 是 | 是 | |
|
||||||
| 43 | numOfMnodeReadThreads | 否 | 是 | |
|
| 45 | udfDebugFlag | 否 | 是 | |
|
||||||
| 44 | numOfVnodeQueryThreads | 否 | 是 | |
|
| 46 | smaDebugFlag | 否 | 是 | |
|
||||||
| 45 | numOfVnodeStreamThreads | 否 | 是 | |
|
| 47 | idxDebugFlag | 否 | 是 | |
|
||||||
| 46 | numOfVnodeFetchThreads | 否 | 是 | |
|
| 48 | tdbDebugFlag | 否 | 是 | |
|
||||||
| 47 | numOfVnodeRsmaThreads | 否 | 是 | |
|
| 49 | metaDebugFlag | 否 | 是 | |
|
||||||
| 48 | numOfQnodeQueryThreads | 否 | 是 | |
|
| 50 | timezone | 是 | 是 | |
|
||||||
| 49 | numOfQnodeFetchThreads | 否 | 是 | |
|
| 51 | locale | 是 | 是 | |
|
||||||
| 50 | numOfSnodeSharedThreads | 否 | 是 | |
|
| 52 | charset | 是 | 是 | |
|
||||||
| 51 | numOfSnodeUniqueThreads | 否 | 是 | |
|
| 53 | udf | 是 | 是 | |
|
||||||
| 52 | rpcQueueMemoryAllowed | 否 | 是 | |
|
| 54 | enableCoreFile | 是 | 是 | |
|
||||||
| 53 | logDir | 是 | 是 | |
|
|
||||||
| 54 | minimalLogDirGB | 是 | 是 | |
|
## 2.x->3.0 的废弃参数
|
||||||
| 55 | numOfLogLines | 是 | 是 | |
|
|
||||||
| 56 | asyncLog | 是 | 是 | |
|
| # | **参数** | **适用于 2.X ** | **适用于 3.0 ** | 3.0 版本的当前行为 |
|
||||||
| 57 | logKeepDays | 是 | 是 | |
|
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
|
||||||
| 60 | debugFlag | 是 | 是 | |
|
| 1 | arbitrator | 是 | 否 | 通过 RAFT 协议选主 |
|
||||||
| 61 | tmrDebugFlag | 是 | 是 | |
|
| 2 | numOfThreadsPerCore | 是 | 否 | 有其它参数设置多种线程池的大小 |
|
||||||
| 62 | uDebugFlag | 是 | 是 | |
|
| 3 | numOfMnodes | 是 | 否 | 通过 create mnode 命令动态创建 mnode |
|
||||||
| 63 | rpcDebugFlag | 是 | 是 | |
|
| 4 | vnodeBak | 是 | 否 | 3.0 行为未知 |
|
||||||
| 64 | jniDebugFlag | 是 | 是 | |
|
| 5 | balance | 是 | 否 | 负载均衡功能由 split/merge vgroups 实现 |
|
||||||
| 65 | qDebugFlag | 是 | 是 | |
|
| 6 | balanceInterval | 是 | 否 | 随着 balance 参数失效 |
|
||||||
| 66 | cDebugFlag | 是 | 是 | |
|
| 7 | offlineThreshold | 是 | 否 | 3.0 行为未知 |
|
||||||
| 67 | dDebugFlag | 是 | 是 | |
|
| 8 | role | 是 | 否 | 由 supportVnode 决定是否能够创建 |
|
||||||
| 68 | vDebugFlag | 是 | 是 | |
|
| 9 | dnodeNopLoop | 是 | 否 | 2.6 文档中未找到此参数 |
|
||||||
| 69 | mDebugFlag | 是 | 是 | |
|
| 10 | keepTimeOffset | 是 | 否 | 2.6 文档中未找到此参数 |
|
||||||
| 70 | wDebugFlag | 是 | 是 | |
|
| 11 | rpcTimer | 是 | 否 | 3.0 行为未知 |
|
||||||
| 71 | sDebugFlag | 是 | 是 | |
|
| 12 | rpcMaxTime | 是 | 否 | 3.0 行为未知 |
|
||||||
| 72 | tsdbDebugFlag | 是 | 是 | |
|
| 13 | rpcForceTcp | 是 | 否 | 默认为 TCP |
|
||||||
| 73 | tqDebugFlag | 否 | 是 | |
|
| 14 | tcpConnTimeout | 是 | 否 | 3.0 行为未知 |
|
||||||
| 74 | fsDebugFlag | 是 | 是 | |
|
| 15 | syncCheckInterval | 是 | 否 | 3.0 行为未知 |
|
||||||
| 75 | udfDebugFlag | 否 | 是 | |
|
| 16 | maxTmrCtrl | 是 | 否 | 3.0 行为未知 |
|
||||||
| 76 | smaDebugFlag | 否 | 是 | |
|
| 17 | monitorReplica | 是 | 否 | 由 RAFT 协议管理多副本 |
|
||||||
| 77 | idxDebugFlag | 否 | 是 | |
|
| 18 | smlTagNullName | 是 | 否 | 3.0 行为未知 |
|
||||||
| 78 | tdbDebugFlag | 否 | 是 | |
|
| 19 | keepColumnName | 是 | 否 | 3.0 行为未知 |
|
||||||
| 79 | metaDebugFlag | 否 | 是 | |
|
| 20 | ratioOfQueryCores | 是 | 否 | 由 线程池 相关配置参数决定 |
|
||||||
| 80 | timezone | 是 | 是 | |
|
| 21 | maxStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||||
| 81 | locale | 是 | 是 | |
|
| 22 | maxFirstStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||||
| 82 | charset | 是 | 是 | |
|
| 23 | retryStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||||
| 83 | udf | 是 | 是 | |
|
| 24 | streamCompDelayRatio | 是 | 否 | 3.0 行为未知 |
|
||||||
| 84 | enableCoreFile | 是 | 是 | |
|
| 25 | maxVgroupsPerDb | 是 | 否 | 由 create db 的参数 vgroups 指定实际 vgroups 数量 |
|
||||||
| 85 | arbitrator | 是 | 否 | 通过 RAFT 协议选主 |
|
| 26 | maxTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||||
| 86 | numOfThreadsPerCore | 是 | 否 | 有其它参数设置多种线程池的大小 |
|
| 27 | minTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||||
| 87 | numOfMnodes | 是 | 否 | 通过 create mnode 命令动态创建 mnode |
|
| 28 | tableIncStepPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||||
| 88 | vnodeBak | 是 | 否 | 3.0 行为未知 |
|
| 29 | cache | 是 | 否 | 由 buffer 代替 cache\*blocks |
|
||||||
| 89 | balance | 是 | 否 | 负载均衡功能由 split/merge vgroups 实现 |
|
| 30 | blocks | 是 | 否 | 由 buffer 代替 cache\*blocks |
|
||||||
| 90 | balanceInterval | 是 | 否 | 随着 balance 参数失效 |
|
| 31 | days | 是 | 否 | 由 create db 的参数 duration 取代 |
|
||||||
| 91 | offlineThreshold | 是 | 否 | 3.0 行为未知 |
|
| 32 | keep | 是 | 否 | 由 create db 的参数 keep 取代 |
|
||||||
| 92 | role | 是 | 否 | 由 supportVnode 决定是否能够创建 |
|
| 33 | minRows | 是 | 否 | 由 create db 的参数 minRows 取代 |
|
||||||
| 93 | dnodeNopLoop | 是 | 否 | 2.6 文档中未找到此参数 |
|
| 34 | maxRows | 是 | 否 | 由 create db 的参数 maxRows 取代 |
|
||||||
| 94 | keepTimeOffset | 是 | 否 | 2.6 文档中未找到此参数 |
|
| 35 | quorum | 是 | 否 | 由 RAFT 协议决定 |
|
||||||
| 95 | rpcTimer | 是 | 否 | 3.0 行为未知 |
|
| 36 | comp | 是 | 否 | 由 create db 的参数 comp 取代 |
|
||||||
| 96 | rpcMaxTime | 是 | 否 | 3.0 行为未知 |
|
| 37 | walLevel | 是 | 否 | 由 create db 的参数 wal_level 取代 |
|
||||||
| 97 | rpcForceTcp | 是 | 否 | 默认为 TCP |
|
| 38 | fsync | 是 | 否 | 由 create db 的参数 wal_fsync_period 取代 |
|
||||||
| 98 | tcpConnTimeout | 是 | 否 | 3.0 行为未知 |
|
| 39 | replica | 是 | 否 | 由 create db 的参数 replica 取代 |
|
||||||
| 99 | syncCheckInterval | 是 | 否 | 3.0 行为未知 |
|
| 40 | partitions | 是 | 否 | 3.0 行为未知 |
|
||||||
| 100 | maxTmrCtrl | 是 | 否 | 3.0 行为未知 |
|
| 41 | update | 是 | 否 | 允许更新部分列 |
|
||||||
| 101 | monitorReplica | 是 | 否 | 由 RAFT 协议管理多副本 |
|
| 42 | cachelast | 是 | 否 | 由 create db 的参数 cacheModel 取代 |
|
||||||
| 102 | smlTagNullName | 是 | 否 | 3.0 行为未知 |
|
| 43 | maxSQLLength | 是 | 否 | SQL 上限为 1MB,无需参数控制 |
|
||||||
| 103 | keepColumnName | 是 | 否 | 3.0 行为未知 |
|
| 44 | maxWildCardsLength | 是 | 否 | 3.0 行为未知 |
|
||||||
| 104 | ratioOfQueryCores | 是 | 否 | 由 线程池 相关配置参数决定 |
|
| 45 | maxRegexStringLen | 是 | 否 | 3.0 行为未知 |
|
||||||
| 105 | maxStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
| 46 | maxNumOfOrderedRes | 是 | 否 | 3.0 行为未知 |
|
||||||
| 106 | maxFirstStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
| 47 | maxConnections | 是 | 否 | 取决于系统配置和系统处理能力,详见后面的 Note |
|
||||||
| 107 | retryStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
| 48 | mnodeEqualVnodeNum | 是 | 否 | 3.0 行为未知 |
|
||||||
| 108 | streamCompDelayRatio | 是 | 否 | 3.0 行为未知 |
|
| 49 | http | 是 | 否 | http 服务由 taosAdapter 提供 |
|
||||||
| 109 | maxVgroupsPerDb | 是 | 否 | 由 create db 的参数 vgroups 指定实际 vgroups 数量 |
|
| 50 | httpEnableRecordSql | 是 | 否 | taosd 不提供 http 服务 |
|
||||||
| 110 | maxTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
| 51 | httpMaxThreads | 是 | 否 | taosd 不提供 http 服务 |
|
||||||
| 111 | minTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
| 52 | restfulRowLimit | 是 | 否 | taosd 不提供 http 服务 |
|
||||||
| 112 | tableIncStepPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
| 53 | httpDbNameMandatory | 是 | 否 | taosd 不提供 http 服务 |
|
||||||
| 113 | cache | 是 | 否 | 由 buffer 代替 cache\*blocks |
|
| 54 | httpKeepAlive | 是 | 否 | taosd 不提供 http 服务 |
|
||||||
| 114 | blocks | 是 | 否 | 由 buffer 代替 cache\*blocks |
|
| 55 | enableRecordSql | 是 | 否 | 3.0 行为未知 |
|
||||||
| 115 | days | 是 | 否 | 由 create db 的参数 duration 取代 |
|
| 56 | maxBinaryDisplayWidth | 是 | 否 | 3.0 行为未知 |
|
||||||
| 116 | keep | 是 | 否 | 由 create db 的参数 keep 取代 |
|
| 57 | stream | 是 | 否 | 默认启用连续查询 |
|
||||||
| 117 | minRows | 是 | 否 | 由 create db 的参数 minRows 取代 |
|
| 58 | retrieveBlockingModel | 是 | 否 | 3.0 行为未知 |
|
||||||
| 118 | maxRows | 是 | 否 | 由 create db 的参数 maxRows 取代 |
|
| 59 | tsdbMetaCompactRatio | 是 | 否 | 3.0 行为未知 |
|
||||||
| 119 | quorum | 是 | 否 | 由 RAFT 协议决定 |
|
| 60 | defaultJSONStrType | 是 | 否 | 3.0 行为未知 |
|
||||||
| 120 | comp | 是 | 否 | 由 create db 的参数 comp 取代 |
|
| 61 | walFlushSize | 是 | 否 | 3.0 行为未知 |
|
||||||
| 121 | walLevel | 是 | 否 | 由 create db 的参数 wal_level 取代 |
|
| 62 | keepTimeOffset | 是 | 否 | 3.0 行为未知 |
|
||||||
| 122 | fsync | 是 | 否 | 由 create db 的参数 wal_fsync_period 取代 |
|
| 63 | flowctrl | 是 | 否 | 3.0 行为未知 |
|
||||||
| 123 | replica | 是 | 否 | 由 create db 的参数 replica 取代 |
|
| 64 | slaveQuery | 是 | 否 | 3.0 行为未知: slave vnode 是否能够处理查询? |
|
||||||
| 124 | partitions | 是 | 否 | 3.0 行为未知 |
|
| 65 | adjustMaster | 是 | 否 | 3.0 行为未知 |
|
||||||
| 125 | update | 是 | 否 | 允许更新部分列 |
|
| 66 | topicBinaryLen | 是 | 否 | 3.0 行为未知 |
|
||||||
| 126 | cachelast | 是 | 否 | 由 create db 的参数 cacheModel 取代 |
|
| 67 | telegrafUseFieldNum | 是 | 否 | 3.0 行为未知 |
|
||||||
| 127 | maxSQLLength | 是 | 否 | SQL 上限为 1MB,无需参数控制 |
|
| 68 | deadLockKillQuery | 是 | 否 | 3.0 行为未知 |
|
||||||
| 128 | maxWildCardsLength | 是 | 否 | 3.0 行为未知 |
|
| 69 | clientMerge | 是 | 否 | 3.0 行为未知 |
|
||||||
| 129 | maxRegexStringLen | 是 | 否 | 3.0 行为未知 |
|
| 70 | sdbDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||||
| 130 | maxNumOfOrderedRes | 是 | 否 | 3.0 行为未知 |
|
| 71 | odbcDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||||
| 131 | maxConnections | 是 | 否 | 取决于系统配置和系统处理能力,详见后面的 Note |
|
| 72 | httpDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||||
| 132 | mnodeEqualVnodeNum | 是 | 否 | 3.0 行为未知 |
|
| 73 | monDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||||
| 133 | http | 是 | 否 | http 服务由 taosAdapter 提供 |
|
| 74 | cqDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||||
| 134 | httpEnableRecordSql | 是 | 否 | taosd 不提供 http 服务 |
|
| 75 | shortcutFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||||
| 135 | httpMaxThreads | 是 | 否 | taosd 不提供 http 服务 |
|
| 76 | probeSeconds | 是 | 否 | 3.0 行为未知 |
|
||||||
| 136 | restfulRowLimit | 是 | 否 | taosd 不提供 http 服务 |
|
| 77 | probeKillSeconds | 是 | 否 | 3.0 行为未知 |
|
||||||
| 137 | httpDbNameMandatory | 是 | 否 | taosd 不提供 http 服务 |
|
| 78 | probeInterval | 是 | 否 | 3.0 行为未知 |
|
||||||
| 138 | httpKeepAlive | 是 | 否 | taosd 不提供 http 服务 |
|
| 79 | lossyColumns | 是 | 否 | 3.0 行为未知 |
|
||||||
| 139 | enableRecordSql | 是 | 否 | 3.0 行为未知 |
|
| 80 | fPrecision | 是 | 否 | 3.0 行为未知 |
|
||||||
| 140 | maxBinaryDisplayWidth | 是 | 否 | 3.0 行为未知 |
|
| 81 | dPrecision | 是 | 否 | 3.0 行为未知 |
|
||||||
| 141 | stream | 是 | 否 | 默认启用连续查询 |
|
| 82 | maxRange | 是 | 否 | 3.0 行为未知 |
|
||||||
| 142 | retrieveBlockingModel | 是 | 否 | 3.0 行为未知 |
|
| 83 | range | 是 | 否 | 3.0 行为未知 |
|
||||||
| 143 | tsdbMetaCompactRatio | 是 | 否 | 3.0 行为未知 |
|
|
||||||
| 144 | defaultJSONStrType | 是 | 否 | 3.0 行为未知 |
|
|
||||||
| 145 | walFlushSize | 是 | 否 | 3.0 行为未知 |
|
|
||||||
| 146 | keepTimeOffset | 是 | 否 | 3.0 行为未知 |
|
|
||||||
| 147 | flowctrl | 是 | 否 | 3.0 行为未知 |
|
|
||||||
| 148 | slaveQuery | 是 | 否 | 3.0 行为未知: slave vnode 是否能够处理查询? |
|
|
||||||
| 149 | adjustMaster | 是 | 否 | 3.0 行为未知 |
|
|
||||||
| 150 | topicBinaryLen | 是 | 否 | 3.0 行为未知 |
|
|
||||||
| 151 | telegrafUseFieldNum | 是 | 否 | 3.0 行为未知 |
|
|
||||||
| 152 | deadLockKillQuery | 是 | 否 | 3.0 行为未知 |
|
|
||||||
| 153 | clientMerge | 是 | 否 | 3.0 行为未知 |
|
|
||||||
| 154 | sdbDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
|
||||||
| 155 | odbcDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
|
||||||
| 156 | httpDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
|
||||||
| 157 | monDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
|
||||||
| 158 | cqDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
|
||||||
| 159 | shortcutFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
|
||||||
| 160 | probeSeconds | 是 | 否 | 3.0 行为未知 |
|
|
||||||
| 161 | probeKillSeconds | 是 | 否 | 3.0 行为未知 |
|
|
||||||
| 162 | probeInterval | 是 | 否 | 3.0 行为未知 |
|
|
||||||
| 163 | lossyColumns | 是 | 否 | 3.0 行为未知 |
|
|
||||||
| 164 | fPrecision | 是 | 否 | 3.0 行为未知 |
|
|
||||||
| 165 | dPrecision | 是 | 否 | 3.0 行为未知 |
|
|
||||||
| 166 | maxRange | 是 | 否 | 3.0 行为未知 |
|
|
||||||
| 167 | range | 是 | 否 | 3.0 行为未知 |
|
|
||||||
|
|
|
@ -10,11 +10,14 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 3.0.2.0
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.2.0" />
|
||||||
|
|
||||||
## 3.0.1.8
|
## 3.0.1.8
|
||||||
|
|
||||||
<Release type="tdengine" version="3.0.1.8" />
|
<Release type="tdengine" version="3.0.1.8" />
|
||||||
|
|
||||||
|
|
||||||
## 3.0.1.7
|
## 3.0.1.7
|
||||||
|
|
||||||
<Release type="tdengine" version="3.0.1.7" />
|
<Release type="tdengine" version="3.0.1.7" />
|
||||||
|
|
|
@ -10,6 +10,10 @@ taosTools 各版本安装包下载链接如下:
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 2.3.2
|
||||||
|
|
||||||
|
<Release type="tools" version="2.3.2" />
|
||||||
|
|
||||||
## 2.3.0
|
## 2.3.0
|
||||||
|
|
||||||
<Release type="tools" version="2.3.0" />
|
<Release type="tools" version="2.3.0" />
|
||||||
|
|
|
@ -267,76 +267,78 @@
|
||||||
#define TK_BY 249
|
#define TK_BY 249
|
||||||
#define TK_SESSION 250
|
#define TK_SESSION 250
|
||||||
#define TK_STATE_WINDOW 251
|
#define TK_STATE_WINDOW 251
|
||||||
#define TK_SLIDING 252
|
#define TK_EVENT_WINDOW 252
|
||||||
#define TK_FILL 253
|
#define TK_START 253
|
||||||
#define TK_VALUE 254
|
#define TK_SLIDING 254
|
||||||
#define TK_NONE 255
|
#define TK_FILL 255
|
||||||
#define TK_PREV 256
|
#define TK_VALUE 256
|
||||||
#define TK_LINEAR 257
|
#define TK_NONE 257
|
||||||
#define TK_NEXT 258
|
#define TK_PREV 258
|
||||||
#define TK_HAVING 259
|
#define TK_LINEAR 259
|
||||||
#define TK_RANGE 260
|
#define TK_NEXT 260
|
||||||
#define TK_EVERY 261
|
#define TK_HAVING 261
|
||||||
#define TK_ORDER 262
|
#define TK_RANGE 262
|
||||||
#define TK_SLIMIT 263
|
#define TK_EVERY 263
|
||||||
#define TK_SOFFSET 264
|
#define TK_ORDER 264
|
||||||
#define TK_LIMIT 265
|
#define TK_SLIMIT 265
|
||||||
#define TK_OFFSET 266
|
#define TK_SOFFSET 266
|
||||||
#define TK_ASC 267
|
#define TK_LIMIT 267
|
||||||
#define TK_NULLS 268
|
#define TK_OFFSET 268
|
||||||
#define TK_ABORT 269
|
#define TK_ASC 269
|
||||||
#define TK_AFTER 270
|
#define TK_NULLS 270
|
||||||
#define TK_ATTACH 271
|
#define TK_ABORT 271
|
||||||
#define TK_BEFORE 272
|
#define TK_AFTER 272
|
||||||
#define TK_BEGIN 273
|
#define TK_ATTACH 273
|
||||||
#define TK_BITAND 274
|
#define TK_BEFORE 274
|
||||||
#define TK_BITNOT 275
|
#define TK_BEGIN 275
|
||||||
#define TK_BITOR 276
|
#define TK_BITAND 276
|
||||||
#define TK_BLOCKS 277
|
#define TK_BITNOT 277
|
||||||
#define TK_CHANGE 278
|
#define TK_BITOR 278
|
||||||
#define TK_COMMA 279
|
#define TK_BLOCKS 279
|
||||||
#define TK_COMPACT 280
|
#define TK_CHANGE 280
|
||||||
#define TK_CONCAT 281
|
#define TK_COMMA 281
|
||||||
#define TK_CONFLICT 282
|
#define TK_COMPACT 282
|
||||||
#define TK_COPY 283
|
#define TK_CONCAT 283
|
||||||
#define TK_DEFERRED 284
|
#define TK_CONFLICT 284
|
||||||
#define TK_DELIMITERS 285
|
#define TK_COPY 285
|
||||||
#define TK_DETACH 286
|
#define TK_DEFERRED 286
|
||||||
#define TK_DIVIDE 287
|
#define TK_DELIMITERS 287
|
||||||
#define TK_DOT 288
|
#define TK_DETACH 288
|
||||||
#define TK_EACH 289
|
#define TK_DIVIDE 289
|
||||||
#define TK_FAIL 290
|
#define TK_DOT 290
|
||||||
#define TK_FILE 291
|
#define TK_EACH 291
|
||||||
#define TK_FOR 292
|
#define TK_FAIL 292
|
||||||
#define TK_GLOB 293
|
#define TK_FILE 293
|
||||||
#define TK_ID 294
|
#define TK_FOR 294
|
||||||
#define TK_IMMEDIATE 295
|
#define TK_GLOB 295
|
||||||
#define TK_IMPORT 296
|
#define TK_ID 296
|
||||||
#define TK_INITIALLY 297
|
#define TK_IMMEDIATE 297
|
||||||
#define TK_INSTEAD 298
|
#define TK_IMPORT 298
|
||||||
#define TK_ISNULL 299
|
#define TK_INITIALLY 299
|
||||||
#define TK_KEY 300
|
#define TK_INSTEAD 300
|
||||||
#define TK_MODULES 301
|
#define TK_ISNULL 301
|
||||||
#define TK_NK_BITNOT 302
|
#define TK_KEY 302
|
||||||
#define TK_NK_SEMI 303
|
#define TK_MODULES 303
|
||||||
#define TK_NOTNULL 304
|
#define TK_NK_BITNOT 304
|
||||||
#define TK_OF 305
|
#define TK_NK_SEMI 305
|
||||||
#define TK_PLUS 306
|
#define TK_NOTNULL 306
|
||||||
#define TK_PRIVILEGE 307
|
#define TK_OF 307
|
||||||
#define TK_RAISE 308
|
#define TK_PLUS 308
|
||||||
#define TK_REPLACE 309
|
#define TK_PRIVILEGE 309
|
||||||
#define TK_RESTRICT 310
|
#define TK_RAISE 310
|
||||||
#define TK_ROW 311
|
#define TK_REPLACE 311
|
||||||
#define TK_SEMI 312
|
#define TK_RESTRICT 312
|
||||||
#define TK_STAR 313
|
#define TK_ROW 313
|
||||||
#define TK_STATEMENT 314
|
#define TK_SEMI 314
|
||||||
#define TK_STRING 315
|
#define TK_STAR 315
|
||||||
#define TK_TIMES 316
|
#define TK_STATEMENT 316
|
||||||
#define TK_UPDATE 317
|
#define TK_STRING 317
|
||||||
#define TK_VALUES 318
|
#define TK_TIMES 318
|
||||||
#define TK_VARIABLE 319
|
#define TK_UPDATE 319
|
||||||
#define TK_VIEW 320
|
#define TK_VALUES 320
|
||||||
#define TK_WAL 321
|
#define TK_VARIABLE 321
|
||||||
|
#define TK_VIEW 322
|
||||||
|
#define TK_WAL 323
|
||||||
|
|
||||||
#define TK_NK_SPACE 600
|
#define TK_NK_SPACE 600
|
||||||
#define TK_NK_COMMENT 601
|
#define TK_NK_COMMENT 601
|
||||||
|
|
|
@ -112,6 +112,7 @@ typedef enum ENodeType {
|
||||||
QUERY_NODE_COLUMN_REF,
|
QUERY_NODE_COLUMN_REF,
|
||||||
QUERY_NODE_WHEN_THEN,
|
QUERY_NODE_WHEN_THEN,
|
||||||
QUERY_NODE_CASE_WHEN,
|
QUERY_NODE_CASE_WHEN,
|
||||||
|
QUERY_NODE_EVENT_WINDOW,
|
||||||
|
|
||||||
// Statement nodes are used in parser and planner module.
|
// Statement nodes are used in parser and planner module.
|
||||||
QUERY_NODE_SET_OPERATOR = 100,
|
QUERY_NODE_SET_OPERATOR = 100,
|
||||||
|
@ -265,7 +266,9 @@ typedef enum ENodeType {
|
||||||
QUERY_NODE_PHYSICAL_PLAN_DELETE,
|
QUERY_NODE_PHYSICAL_PLAN_DELETE,
|
||||||
QUERY_NODE_PHYSICAL_SUBPLAN,
|
QUERY_NODE_PHYSICAL_SUBPLAN,
|
||||||
QUERY_NODE_PHYSICAL_PLAN,
|
QUERY_NODE_PHYSICAL_PLAN,
|
||||||
QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN
|
QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN,
|
||||||
|
QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT,
|
||||||
|
QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT
|
||||||
} ENodeType;
|
} ENodeType;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -185,7 +185,12 @@ typedef struct SMergeLogicNode {
|
||||||
bool groupSort;
|
bool groupSort;
|
||||||
} SMergeLogicNode;
|
} SMergeLogicNode;
|
||||||
|
|
||||||
typedef enum EWindowType { WINDOW_TYPE_INTERVAL = 1, WINDOW_TYPE_SESSION, WINDOW_TYPE_STATE } EWindowType;
|
typedef enum EWindowType {
|
||||||
|
WINDOW_TYPE_INTERVAL = 1,
|
||||||
|
WINDOW_TYPE_SESSION,
|
||||||
|
WINDOW_TYPE_STATE,
|
||||||
|
WINDOW_TYPE_EVENT
|
||||||
|
} EWindowType;
|
||||||
|
|
||||||
typedef enum EWindowAlgorithm {
|
typedef enum EWindowAlgorithm {
|
||||||
INTERVAL_ALGO_HASH = 1,
|
INTERVAL_ALGO_HASH = 1,
|
||||||
|
@ -212,6 +217,8 @@ typedef struct SWindowLogicNode {
|
||||||
SNode* pTspk;
|
SNode* pTspk;
|
||||||
SNode* pTsEnd;
|
SNode* pTsEnd;
|
||||||
SNode* pStateExpr;
|
SNode* pStateExpr;
|
||||||
|
SNode* pStartCond;
|
||||||
|
SNode* pEndCond;
|
||||||
int8_t triggerType;
|
int8_t triggerType;
|
||||||
int64_t watermark;
|
int64_t watermark;
|
||||||
int64_t deleteMark;
|
int64_t deleteMark;
|
||||||
|
@ -498,6 +505,14 @@ typedef struct SStateWinodwPhysiNode {
|
||||||
|
|
||||||
typedef SStateWinodwPhysiNode SStreamStateWinodwPhysiNode;
|
typedef SStateWinodwPhysiNode SStreamStateWinodwPhysiNode;
|
||||||
|
|
||||||
|
typedef struct SEventWinodwPhysiNode {
|
||||||
|
SWinodwPhysiNode window;
|
||||||
|
SNode* pStartCond;
|
||||||
|
SNode* pEndCond;
|
||||||
|
} SEventWinodwPhysiNode;
|
||||||
|
|
||||||
|
typedef SEventWinodwPhysiNode SStreamEventWinodwPhysiNode;
|
||||||
|
|
||||||
typedef struct SSortPhysiNode {
|
typedef struct SSortPhysiNode {
|
||||||
SPhysiNode node;
|
SPhysiNode node;
|
||||||
SNodeList* pExprs; // these are expression list of order_by_clause and parameter expression of aggregate function
|
SNodeList* pExprs; // these are expression list of order_by_clause and parameter expression of aggregate function
|
||||||
|
|
|
@ -223,6 +223,13 @@ typedef struct SIntervalWindowNode {
|
||||||
SNode* pFill;
|
SNode* pFill;
|
||||||
} SIntervalWindowNode;
|
} SIntervalWindowNode;
|
||||||
|
|
||||||
|
typedef struct SEventWindowNode {
|
||||||
|
ENodeType type; // QUERY_NODE_EVENT_WINDOW
|
||||||
|
SNode* pCol; // timestamp primary key
|
||||||
|
SNode* pStartCond;
|
||||||
|
SNode* pEndCond;
|
||||||
|
} SEventWindowNode;
|
||||||
|
|
||||||
typedef enum EFillMode {
|
typedef enum EFillMode {
|
||||||
FILL_MODE_NONE = 1,
|
FILL_MODE_NONE = 1,
|
||||||
FILL_MODE_VALUE,
|
FILL_MODE_VALUE,
|
||||||
|
|
|
@ -295,6 +295,13 @@ static int32_t stateWindowNodeCopy(const SStateWindowNode* pSrc, SStateWindowNod
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t eventWindowNodeCopy(const SEventWindowNode* pSrc, SEventWindowNode* pDst) {
|
||||||
|
CLONE_NODE_FIELD(pCol);
|
||||||
|
CLONE_NODE_FIELD(pStartCond);
|
||||||
|
CLONE_NODE_FIELD(pEndCond);
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t sessionWindowNodeCopy(const SSessionWindowNode* pSrc, SSessionWindowNode* pDst) {
|
static int32_t sessionWindowNodeCopy(const SSessionWindowNode* pSrc, SSessionWindowNode* pDst) {
|
||||||
CLONE_NODE_FIELD_EX(pCol, SColumnNode*);
|
CLONE_NODE_FIELD_EX(pCol, SColumnNode*);
|
||||||
CLONE_NODE_FIELD_EX(pGap, SValueNode*);
|
CLONE_NODE_FIELD_EX(pGap, SValueNode*);
|
||||||
|
@ -462,6 +469,8 @@ static int32_t logicWindowCopy(const SWindowLogicNode* pSrc, SWindowLogicNode* p
|
||||||
CLONE_NODE_FIELD(pTspk);
|
CLONE_NODE_FIELD(pTspk);
|
||||||
CLONE_NODE_FIELD(pTsEnd);
|
CLONE_NODE_FIELD(pTsEnd);
|
||||||
CLONE_NODE_FIELD(pStateExpr);
|
CLONE_NODE_FIELD(pStateExpr);
|
||||||
|
CLONE_NODE_FIELD(pStartCond);
|
||||||
|
CLONE_NODE_FIELD(pEndCond);
|
||||||
COPY_SCALAR_FIELD(triggerType);
|
COPY_SCALAR_FIELD(triggerType);
|
||||||
COPY_SCALAR_FIELD(watermark);
|
COPY_SCALAR_FIELD(watermark);
|
||||||
COPY_SCALAR_FIELD(deleteMark);
|
COPY_SCALAR_FIELD(deleteMark);
|
||||||
|
@ -709,6 +718,9 @@ SNode* nodesCloneNode(const SNode* pNode) {
|
||||||
case QUERY_NODE_STATE_WINDOW:
|
case QUERY_NODE_STATE_WINDOW:
|
||||||
code = stateWindowNodeCopy((const SStateWindowNode*)pNode, (SStateWindowNode*)pDst);
|
code = stateWindowNodeCopy((const SStateWindowNode*)pNode, (SStateWindowNode*)pDst);
|
||||||
break;
|
break;
|
||||||
|
case QUERY_NODE_EVENT_WINDOW:
|
||||||
|
code = eventWindowNodeCopy((const SEventWindowNode*)pNode, (SEventWindowNode*)pDst);
|
||||||
|
break;
|
||||||
case QUERY_NODE_SESSION_WINDOW:
|
case QUERY_NODE_SESSION_WINDOW:
|
||||||
code = sessionWindowNodeCopy((const SSessionWindowNode*)pNode, (SSessionWindowNode*)pDst);
|
code = sessionWindowNodeCopy((const SSessionWindowNode*)pNode, (SSessionWindowNode*)pDst);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -85,6 +85,8 @@ const char* nodesNodeName(ENodeType type) {
|
||||||
return "WhenThen";
|
return "WhenThen";
|
||||||
case QUERY_NODE_CASE_WHEN:
|
case QUERY_NODE_CASE_WHEN:
|
||||||
return "CaseWhen";
|
return "CaseWhen";
|
||||||
|
case QUERY_NODE_EVENT_WINDOW:
|
||||||
|
return "EventWindow";
|
||||||
case QUERY_NODE_SET_OPERATOR:
|
case QUERY_NODE_SET_OPERATOR:
|
||||||
return "SetOperator";
|
return "SetOperator";
|
||||||
case QUERY_NODE_SELECT_STMT:
|
case QUERY_NODE_SELECT_STMT:
|
||||||
|
@ -233,6 +235,10 @@ const char* nodesNodeName(ENodeType type) {
|
||||||
return "PhysiLastRowScan";
|
return "PhysiLastRowScan";
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN:
|
case QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN:
|
||||||
return "PhysiTableCountScan";
|
return "PhysiTableCountScan";
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT:
|
||||||
|
return "PhysiMergeEventWindow";
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT:
|
||||||
|
return "PhysiStreamEventWindow";
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
|
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
|
||||||
return "PhysiProject";
|
return "PhysiProject";
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN:
|
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN:
|
||||||
|
@ -2272,6 +2278,37 @@ static int32_t jsonToPhysiStateWindowNode(const SJson* pJson, void* pObj) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const char* jkEventWindowPhysiPlanStartCond = "StartCond";
|
||||||
|
static const char* jkEventWindowPhysiPlanEndCond = "EndCond";
|
||||||
|
|
||||||
|
static int32_t physiEventWindowNodeToJson(const void* pObj, SJson* pJson) {
|
||||||
|
const SEventWinodwPhysiNode* pNode = (const SEventWinodwPhysiNode*)pObj;
|
||||||
|
|
||||||
|
int32_t code = physiWindowNodeToJson(pObj, pJson);
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
code = tjsonAddObject(pJson, jkEventWindowPhysiPlanStartCond, nodeToJson, pNode->pStartCond);
|
||||||
|
}
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
code = tjsonAddObject(pJson, jkEventWindowPhysiPlanEndCond, nodeToJson, pNode->pEndCond);
|
||||||
|
}
|
||||||
|
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t jsonToPhysiEventWindowNode(const SJson* pJson, void* pObj) {
|
||||||
|
SEventWinodwPhysiNode* pNode = (SEventWinodwPhysiNode*)pObj;
|
||||||
|
|
||||||
|
int32_t code = jsonToPhysiWindowNode(pJson, pObj);
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
code = jsonToNodeObject(pJson, jkEventWindowPhysiPlanStartCond, &pNode->pStartCond);
|
||||||
|
}
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
code = jsonToNodeObject(pJson, jkEventWindowPhysiPlanEndCond, &pNode->pEndCond);
|
||||||
|
}
|
||||||
|
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
static const char* jkPartitionPhysiPlanExprs = "Exprs";
|
static const char* jkPartitionPhysiPlanExprs = "Exprs";
|
||||||
static const char* jkPartitionPhysiPlanPartitionKeys = "PartitionKeys";
|
static const char* jkPartitionPhysiPlanPartitionKeys = "PartitionKeys";
|
||||||
static const char* jkPartitionPhysiPlanTargets = "Targets";
|
static const char* jkPartitionPhysiPlanTargets = "Targets";
|
||||||
|
@ -3660,6 +3697,36 @@ static int32_t jsonToSessionWindowNode(const SJson* pJson, void* pObj) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const char* jkEventWindowTsPrimaryKey = "TsPrimaryKey";
|
||||||
|
static const char* jkEventWindowStartCond = "StartCond";
|
||||||
|
static const char* jkEventWindowEndCond = "EndCond";
|
||||||
|
|
||||||
|
static int32_t eventWindowNodeToJson(const void* pObj, SJson* pJson) {
|
||||||
|
const SEventWindowNode* pNode = (const SEventWindowNode*)pObj;
|
||||||
|
|
||||||
|
int32_t code = tjsonAddObject(pJson, jkEventWindowTsPrimaryKey, nodeToJson, pNode->pCol);
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
code = tjsonAddObject(pJson, jkEventWindowStartCond, nodeToJson, pNode->pStartCond);
|
||||||
|
}
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
code = tjsonAddObject(pJson, jkEventWindowEndCond, nodeToJson, pNode->pEndCond);
|
||||||
|
}
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t jsonToEventWindowNode(const SJson* pJson, void* pObj) {
|
||||||
|
SEventWindowNode* pNode = (SEventWindowNode*)pObj;
|
||||||
|
|
||||||
|
int32_t code = jsonToNodeObject(pJson, jkEventWindowTsPrimaryKey, &pNode->pCol);
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
code = jsonToNodeObject(pJson, jkEventWindowStartCond, &pNode->pStartCond);
|
||||||
|
}
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
code = jsonToNodeObject(pJson, jkEventWindowEndCond, &pNode->pEndCond);
|
||||||
|
}
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
static const char* jkIntervalWindowInterval = "Interval";
|
static const char* jkIntervalWindowInterval = "Interval";
|
||||||
static const char* jkIntervalWindowOffset = "Offset";
|
static const char* jkIntervalWindowOffset = "Offset";
|
||||||
static const char* jkIntervalWindowSliding = "Sliding";
|
static const char* jkIntervalWindowSliding = "Sliding";
|
||||||
|
@ -4615,6 +4682,8 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
|
||||||
return whenThenNodeToJson(pObj, pJson);
|
return whenThenNodeToJson(pObj, pJson);
|
||||||
case QUERY_NODE_CASE_WHEN:
|
case QUERY_NODE_CASE_WHEN:
|
||||||
return caseWhenNodeToJson(pObj, pJson);
|
return caseWhenNodeToJson(pObj, pJson);
|
||||||
|
case QUERY_NODE_EVENT_WINDOW:
|
||||||
|
return eventWindowNodeToJson(pObj, pJson);
|
||||||
case QUERY_NODE_SET_OPERATOR:
|
case QUERY_NODE_SET_OPERATOR:
|
||||||
return setOperatorToJson(pObj, pJson);
|
return setOperatorToJson(pObj, pJson);
|
||||||
case QUERY_NODE_SELECT_STMT:
|
case QUERY_NODE_SELECT_STMT:
|
||||||
|
@ -4712,6 +4781,9 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE:
|
case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE:
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
|
||||||
return physiStateWindowNodeToJson(pObj, pJson);
|
return physiStateWindowNodeToJson(pObj, pJson);
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT:
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT:
|
||||||
|
return physiEventWindowNodeToJson(pObj, pJson);
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
|
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
|
||||||
return physiPartitionNodeToJson(pObj, pJson);
|
return physiPartitionNodeToJson(pObj, pJson);
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
|
||||||
|
@ -4787,6 +4859,8 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
|
||||||
return jsonToWhenThenNode(pJson, pObj);
|
return jsonToWhenThenNode(pJson, pObj);
|
||||||
case QUERY_NODE_CASE_WHEN:
|
case QUERY_NODE_CASE_WHEN:
|
||||||
return jsonToCaseWhenNode(pJson, pObj);
|
return jsonToCaseWhenNode(pJson, pObj);
|
||||||
|
case QUERY_NODE_EVENT_WINDOW:
|
||||||
|
return jsonToEventWindowNode(pJson, pObj);
|
||||||
case QUERY_NODE_SET_OPERATOR:
|
case QUERY_NODE_SET_OPERATOR:
|
||||||
return jsonToSetOperator(pJson, pObj);
|
return jsonToSetOperator(pJson, pObj);
|
||||||
case QUERY_NODE_SELECT_STMT:
|
case QUERY_NODE_SELECT_STMT:
|
||||||
|
@ -4871,6 +4945,9 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE:
|
case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE:
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
|
||||||
return jsonToPhysiStateWindowNode(pJson, pObj);
|
return jsonToPhysiStateWindowNode(pJson, pObj);
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT:
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT:
|
||||||
|
return jsonToPhysiEventWindowNode(pJson, pObj);
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
|
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
|
||||||
return jsonToPhysiPartitionNode(pJson, pObj);
|
return jsonToPhysiPartitionNode(pJson, pObj);
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
|
||||||
|
|
|
@ -2927,6 +2927,46 @@ static int32_t msgToPhysiStateWindowNode(STlvDecoder* pDecoder, void* pObj) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enum { PHY_EVENT_CODE_WINDOW = 1, PHY_EVENT_CODE_START_COND, PHY_EVENT_CODE_END_COND };
|
||||||
|
|
||||||
|
static int32_t physiEventWindowNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
||||||
|
const SEventWinodwPhysiNode* pNode = (const SEventWinodwPhysiNode*)pObj;
|
||||||
|
|
||||||
|
int32_t code = tlvEncodeObj(pEncoder, PHY_EVENT_CODE_WINDOW, physiWindowNodeToMsg, &pNode->window);
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
code = tlvEncodeObj(pEncoder, PHY_EVENT_CODE_START_COND, nodeToMsg, pNode->pStartCond);
|
||||||
|
}
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
code = tlvEncodeObj(pEncoder, PHY_EVENT_CODE_END_COND, nodeToMsg, pNode->pEndCond);
|
||||||
|
}
|
||||||
|
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t msgToPhysiEventWindowNode(STlvDecoder* pDecoder, void* pObj) {
|
||||||
|
SEventWinodwPhysiNode* pNode = (SEventWinodwPhysiNode*)pObj;
|
||||||
|
|
||||||
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
STlv* pTlv = NULL;
|
||||||
|
tlvForEach(pDecoder, pTlv, code) {
|
||||||
|
switch (pTlv->type) {
|
||||||
|
case PHY_EVENT_CODE_WINDOW:
|
||||||
|
code = tlvDecodeObjFromTlv(pTlv, msgToPhysiWindowNode, &pNode->window);
|
||||||
|
break;
|
||||||
|
case PHY_EVENT_CODE_START_COND:
|
||||||
|
code = msgToNodeFromTlv(pTlv, (void**)&pNode->pStartCond);
|
||||||
|
break;
|
||||||
|
case PHY_EVENT_CODE_END_COND:
|
||||||
|
code = msgToNodeFromTlv(pTlv, (void**)&pNode->pEndCond);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
enum { PHY_PARTITION_CODE_BASE_NODE = 1, PHY_PARTITION_CODE_EXPR, PHY_PARTITION_CODE_KEYS, PHY_PARTITION_CODE_TARGETS };
|
enum { PHY_PARTITION_CODE_BASE_NODE = 1, PHY_PARTITION_CODE_EXPR, PHY_PARTITION_CODE_KEYS, PHY_PARTITION_CODE_TARGETS };
|
||||||
|
|
||||||
static int32_t physiPartitionNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
static int32_t physiPartitionNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
||||||
|
@ -3698,6 +3738,10 @@ static int32_t specificNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
|
||||||
code = physiStateWindowNodeToMsg(pObj, pEncoder);
|
code = physiStateWindowNodeToMsg(pObj, pEncoder);
|
||||||
break;
|
break;
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT:
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT:
|
||||||
|
code = physiEventWindowNodeToMsg(pObj, pEncoder);
|
||||||
|
break;
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
|
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
|
||||||
code = physiPartitionNodeToMsg(pObj, pEncoder);
|
code = physiPartitionNodeToMsg(pObj, pEncoder);
|
||||||
break;
|
break;
|
||||||
|
@ -3837,6 +3881,10 @@ static int32_t msgToSpecificNode(STlvDecoder* pDecoder, void* pObj) {
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
|
||||||
code = msgToPhysiStateWindowNode(pDecoder, pObj);
|
code = msgToPhysiStateWindowNode(pDecoder, pObj);
|
||||||
break;
|
break;
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT:
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT:
|
||||||
|
code = msgToPhysiEventWindowNode(pDecoder, pObj);
|
||||||
|
break;
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
|
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
|
||||||
code = msgToPhysiPartitionNode(pDecoder, pObj);
|
code = msgToPhysiPartitionNode(pDecoder, pObj);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -165,6 +165,17 @@ static EDealRes dispatchExpr(SNode* pNode, ETraversalOrder order, FNodeWalker wa
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case QUERY_NODE_EVENT_WINDOW: {
|
||||||
|
SEventWindowNode* pEvent = (SEventWindowNode*)pNode;
|
||||||
|
res = walkExpr(pEvent->pCol, order, walker, pContext);
|
||||||
|
if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
|
||||||
|
res = walkExpr(pEvent->pStartCond, order, walker, pContext);
|
||||||
|
}
|
||||||
|
if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
|
||||||
|
res = walkExpr(pEvent->pEndCond, order, walker, pContext);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -329,6 +340,17 @@ static EDealRes rewriteExpr(SNode** pRawNode, ETraversalOrder order, FNodeRewrit
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case QUERY_NODE_EVENT_WINDOW: {
|
||||||
|
SEventWindowNode* pEvent = (SEventWindowNode*)pNode;
|
||||||
|
res = rewriteExpr(&pEvent->pCol, order, rewriter, pContext);
|
||||||
|
if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
|
||||||
|
res = rewriteExpr(&pEvent->pStartCond, order, rewriter, pContext);
|
||||||
|
}
|
||||||
|
if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
|
||||||
|
res = rewriteExpr(&pEvent->pEndCond, order, rewriter, pContext);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -299,6 +299,8 @@ SNode* nodesMakeNode(ENodeType type) {
|
||||||
return makeNode(type, sizeof(SWhenThenNode));
|
return makeNode(type, sizeof(SWhenThenNode));
|
||||||
case QUERY_NODE_CASE_WHEN:
|
case QUERY_NODE_CASE_WHEN:
|
||||||
return makeNode(type, sizeof(SCaseWhenNode));
|
return makeNode(type, sizeof(SCaseWhenNode));
|
||||||
|
case QUERY_NODE_EVENT_WINDOW:
|
||||||
|
return makeNode(type, sizeof(SEventWindowNode));
|
||||||
case QUERY_NODE_SET_OPERATOR:
|
case QUERY_NODE_SET_OPERATOR:
|
||||||
return makeNode(type, sizeof(SSetOperator));
|
return makeNode(type, sizeof(SSetOperator));
|
||||||
case QUERY_NODE_SELECT_STMT:
|
case QUERY_NODE_SELECT_STMT:
|
||||||
|
@ -535,6 +537,10 @@ SNode* nodesMakeNode(ENodeType type) {
|
||||||
return makeNode(type, sizeof(SStateWinodwPhysiNode));
|
return makeNode(type, sizeof(SStateWinodwPhysiNode));
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
|
||||||
return makeNode(type, sizeof(SStreamStateWinodwPhysiNode));
|
return makeNode(type, sizeof(SStreamStateWinodwPhysiNode));
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT:
|
||||||
|
return makeNode(type, sizeof(SEventWinodwPhysiNode));
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT:
|
||||||
|
return makeNode(type, sizeof(SStreamEventWinodwPhysiNode));
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
|
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
|
||||||
return makeNode(type, sizeof(SPartitionPhysiNode));
|
return makeNode(type, sizeof(SPartitionPhysiNode));
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
|
||||||
|
@ -765,16 +771,23 @@ void nodesDestroyNode(SNode* pNode) {
|
||||||
case QUERY_NODE_COLUMN_REF: // no pointer field
|
case QUERY_NODE_COLUMN_REF: // no pointer field
|
||||||
break;
|
break;
|
||||||
case QUERY_NODE_WHEN_THEN: {
|
case QUERY_NODE_WHEN_THEN: {
|
||||||
SWhenThenNode* pStmt = (SWhenThenNode*)pNode;
|
SWhenThenNode* pWhenThen = (SWhenThenNode*)pNode;
|
||||||
nodesDestroyNode(pStmt->pWhen);
|
nodesDestroyNode(pWhenThen->pWhen);
|
||||||
nodesDestroyNode(pStmt->pThen);
|
nodesDestroyNode(pWhenThen->pThen);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case QUERY_NODE_CASE_WHEN: {
|
case QUERY_NODE_CASE_WHEN: {
|
||||||
SCaseWhenNode* pStmt = (SCaseWhenNode*)pNode;
|
SCaseWhenNode* pCaseWhen = (SCaseWhenNode*)pNode;
|
||||||
nodesDestroyNode(pStmt->pCase);
|
nodesDestroyNode(pCaseWhen->pCase);
|
||||||
nodesDestroyNode(pStmt->pElse);
|
nodesDestroyNode(pCaseWhen->pElse);
|
||||||
nodesDestroyList(pStmt->pWhenThenList);
|
nodesDestroyList(pCaseWhen->pWhenThenList);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case QUERY_NODE_EVENT_WINDOW: {
|
||||||
|
SEventWindowNode* pEvent = (SEventWindowNode*)pNode;
|
||||||
|
nodesDestroyNode(pEvent->pCol);
|
||||||
|
nodesDestroyNode(pEvent->pStartCond);
|
||||||
|
nodesDestroyNode(pEvent->pEndCond);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case QUERY_NODE_SET_OPERATOR: {
|
case QUERY_NODE_SET_OPERATOR: {
|
||||||
|
@ -1232,6 +1245,14 @@ void nodesDestroyNode(SNode* pNode) {
|
||||||
nodesDestroyNode(pPhyNode->pStateKey);
|
nodesDestroyNode(pPhyNode->pStateKey);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT:
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT: {
|
||||||
|
SEventWinodwPhysiNode* pPhyNode = (SEventWinodwPhysiNode*)pNode;
|
||||||
|
destroyWinodwPhysiNode((SWinodwPhysiNode*)pPhyNode);
|
||||||
|
nodesDestroyNode(pPhyNode->pStartCond);
|
||||||
|
nodesDestroyNode(pPhyNode->pEndCond);
|
||||||
|
break;
|
||||||
|
}
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_PARTITION: {
|
case QUERY_NODE_PHYSICAL_PLAN_PARTITION: {
|
||||||
destroyPartitionPhysiNode((SPartitionPhysiNode*)pNode);
|
destroyPartitionPhysiNode((SPartitionPhysiNode*)pNode);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -116,6 +116,7 @@ SNode* createLimitNode(SAstCreateContext* pCxt, const SToken* pLimit, const STok
|
||||||
SNode* createOrderByExprNode(SAstCreateContext* pCxt, SNode* pExpr, EOrder order, ENullOrder nullOrder);
|
SNode* createOrderByExprNode(SAstCreateContext* pCxt, SNode* pExpr, EOrder order, ENullOrder nullOrder);
|
||||||
SNode* createSessionWindowNode(SAstCreateContext* pCxt, SNode* pCol, SNode* pGap);
|
SNode* createSessionWindowNode(SAstCreateContext* pCxt, SNode* pCol, SNode* pGap);
|
||||||
SNode* createStateWindowNode(SAstCreateContext* pCxt, SNode* pExpr);
|
SNode* createStateWindowNode(SAstCreateContext* pCxt, SNode* pExpr);
|
||||||
|
SNode* createEventWindowNode(SAstCreateContext* pCxt, SNode* pStartCond, SNode* pEndCond);
|
||||||
SNode* createIntervalWindowNode(SAstCreateContext* pCxt, SNode* pInterval, SNode* pOffset, SNode* pSliding,
|
SNode* createIntervalWindowNode(SAstCreateContext* pCxt, SNode* pInterval, SNode* pOffset, SNode* pSliding,
|
||||||
SNode* pFill);
|
SNode* pFill);
|
||||||
SNode* createFillNode(SAstCreateContext* pCxt, EFillMode mode, SNode* pValues);
|
SNode* createFillNode(SAstCreateContext* pCxt, EFillMode mode, SNode* pValues);
|
||||||
|
|
|
@ -964,6 +964,8 @@ twindow_clause_opt(A) ::=
|
||||||
twindow_clause_opt(A) ::=
|
twindow_clause_opt(A) ::=
|
||||||
INTERVAL NK_LP duration_literal(B) NK_COMMA duration_literal(C) NK_RP
|
INTERVAL NK_LP duration_literal(B) NK_COMMA duration_literal(C) NK_RP
|
||||||
sliding_opt(D) fill_opt(E). { A = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C), D, E); }
|
sliding_opt(D) fill_opt(E). { A = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C), D, E); }
|
||||||
|
twindow_clause_opt(A) ::=
|
||||||
|
EVENT_WINDOW START WITH search_condition(B) END WITH search_condition(C). { A = createEventWindowNode(pCxt, B, C); }
|
||||||
|
|
||||||
sliding_opt(A) ::= . { A = NULL; }
|
sliding_opt(A) ::= . { A = NULL; }
|
||||||
sliding_opt(A) ::= SLIDING NK_LP duration_literal(B) NK_RP. { A = releaseRawExprNode(pCxt, B); }
|
sliding_opt(A) ::= SLIDING NK_LP duration_literal(B) NK_RP. { A = releaseRawExprNode(pCxt, B); }
|
||||||
|
|
|
@ -605,6 +605,20 @@ SNode* createStateWindowNode(SAstCreateContext* pCxt, SNode* pExpr) {
|
||||||
return (SNode*)state;
|
return (SNode*)state;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SNode* createEventWindowNode(SAstCreateContext* pCxt, SNode* pStartCond, SNode* pEndCond) {
|
||||||
|
CHECK_PARSER_STATUS(pCxt);
|
||||||
|
SEventWindowNode* pEvent = (SEventWindowNode*)nodesMakeNode(QUERY_NODE_EVENT_WINDOW);
|
||||||
|
CHECK_OUT_OF_MEM(pEvent);
|
||||||
|
pEvent->pCol = createPrimaryKeyCol(pCxt, NULL);
|
||||||
|
if (NULL == pEvent->pCol) {
|
||||||
|
nodesDestroyNode((SNode*)pEvent);
|
||||||
|
CHECK_OUT_OF_MEM(NULL);
|
||||||
|
}
|
||||||
|
pEvent->pStartCond = pStartCond;
|
||||||
|
pEvent->pEndCond = pEndCond;
|
||||||
|
return (SNode*)pEvent;
|
||||||
|
}
|
||||||
|
|
||||||
SNode* createIntervalWindowNode(SAstCreateContext* pCxt, SNode* pInterval, SNode* pOffset, SNode* pSliding,
|
SNode* createIntervalWindowNode(SAstCreateContext* pCxt, SNode* pInterval, SNode* pOffset, SNode* pSliding,
|
||||||
SNode* pFill) {
|
SNode* pFill) {
|
||||||
CHECK_PARSER_STATUS(pCxt);
|
CHECK_PARSER_STATUS(pCxt);
|
||||||
|
|
|
@ -90,6 +90,7 @@ static SKeyword keywordTable[] = {
|
||||||
{"EXISTS", TK_EXISTS},
|
{"EXISTS", TK_EXISTS},
|
||||||
{"EXPIRED", TK_EXPIRED},
|
{"EXPIRED", TK_EXPIRED},
|
||||||
{"EXPLAIN", TK_EXPLAIN},
|
{"EXPLAIN", TK_EXPLAIN},
|
||||||
|
{"EVENT_WINDOW", TK_EVENT_WINDOW},
|
||||||
{"EVERY", TK_EVERY},
|
{"EVERY", TK_EVERY},
|
||||||
{"FILE", TK_FILE},
|
{"FILE", TK_FILE},
|
||||||
{"FILL", TK_FILL},
|
{"FILL", TK_FILL},
|
||||||
|
@ -195,15 +196,16 @@ static SKeyword keywordTable[] = {
|
||||||
{"SNODES", TK_SNODES},
|
{"SNODES", TK_SNODES},
|
||||||
{"SOFFSET", TK_SOFFSET},
|
{"SOFFSET", TK_SOFFSET},
|
||||||
{"SPLIT", TK_SPLIT},
|
{"SPLIT", TK_SPLIT},
|
||||||
{"STT_TRIGGER", TK_STT_TRIGGER},
|
|
||||||
{"STABLE", TK_STABLE},
|
{"STABLE", TK_STABLE},
|
||||||
{"STABLES", TK_STABLES},
|
{"STABLES", TK_STABLES},
|
||||||
|
{"START", TK_START},
|
||||||
{"STATE", TK_STATE},
|
{"STATE", TK_STATE},
|
||||||
{"STATE_WINDOW", TK_STATE_WINDOW},
|
{"STATE_WINDOW", TK_STATE_WINDOW},
|
||||||
{"STORAGE", TK_STORAGE},
|
{"STORAGE", TK_STORAGE},
|
||||||
{"STREAM", TK_STREAM},
|
{"STREAM", TK_STREAM},
|
||||||
{"STREAMS", TK_STREAMS},
|
{"STREAMS", TK_STREAMS},
|
||||||
{"STRICT", TK_STRICT},
|
{"STRICT", TK_STRICT},
|
||||||
|
{"STT_TRIGGER", TK_STT_TRIGGER},
|
||||||
{"SUBSCRIBE", TK_SUBSCRIBE},
|
{"SUBSCRIBE", TK_SUBSCRIBE},
|
||||||
{"SUBSCRIPTIONS", TK_SUBSCRIPTIONS},
|
{"SUBSCRIPTIONS", TK_SUBSCRIPTIONS},
|
||||||
{"SUBTABLE", TK_SUBTABLE},
|
{"SUBTABLE", TK_SUBTABLE},
|
||||||
|
|
|
@ -3147,6 +3147,15 @@ static int32_t translateSessionWindow(STranslateContext* pCxt, SSelectStmt* pSel
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t translateEventWindow(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||||
|
if (QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) &&
|
||||||
|
!isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) {
|
||||||
|
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TIMELINE_QUERY,
|
||||||
|
"EVENT_WINDOW requires valid time series input");
|
||||||
|
}
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t translateSpecificWindow(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
static int32_t translateSpecificWindow(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||||
switch (nodeType(pSelect->pWindow)) {
|
switch (nodeType(pSelect->pWindow)) {
|
||||||
case QUERY_NODE_STATE_WINDOW:
|
case QUERY_NODE_STATE_WINDOW:
|
||||||
|
@ -3155,6 +3164,8 @@ static int32_t translateSpecificWindow(STranslateContext* pCxt, SSelectStmt* pSe
|
||||||
return translateSessionWindow(pCxt, pSelect);
|
return translateSessionWindow(pCxt, pSelect);
|
||||||
case QUERY_NODE_INTERVAL_WINDOW:
|
case QUERY_NODE_INTERVAL_WINDOW:
|
||||||
return translateIntervalWindow(pCxt, pSelect);
|
return translateIntervalWindow(pCxt, pSelect);
|
||||||
|
case QUERY_NODE_EVENT_WINDOW:
|
||||||
|
return translateEventWindow(pCxt, pSelect);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -814,6 +814,29 @@ static int32_t createWindowLogicNodeByInterval(SLogicPlanContext* pCxt, SInterva
|
||||||
return createWindowLogicNodeFinalize(pCxt, pSelect, pWindow, pLogicNode);
|
return createWindowLogicNodeFinalize(pCxt, pSelect, pWindow, pLogicNode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t createWindowLogicNodeByEvent(SLogicPlanContext* pCxt, SEventWindowNode* pEvent, SSelectStmt* pSelect,
|
||||||
|
SLogicNode** pLogicNode) {
|
||||||
|
SWindowLogicNode* pWindow = (SWindowLogicNode*)nodesMakeNode(QUERY_NODE_LOGIC_PLAN_WINDOW);
|
||||||
|
if (NULL == pWindow) {
|
||||||
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
|
||||||
|
pWindow->winType = WINDOW_TYPE_EVENT;
|
||||||
|
pWindow->node.groupAction = getGroupAction(pCxt, pSelect);
|
||||||
|
pWindow->node.requireDataOrder =
|
||||||
|
pCxt->pPlanCxt->streamQuery ? DATA_ORDER_LEVEL_IN_BLOCK : getRequireDataOrder(true, pSelect);
|
||||||
|
pWindow->node.resultDataOrder =
|
||||||
|
pCxt->pPlanCxt->streamQuery ? DATA_ORDER_LEVEL_GLOBAL : pWindow->node.requireDataOrder;
|
||||||
|
pWindow->pStartCond = nodesCloneNode(pEvent->pStartCond);
|
||||||
|
pWindow->pEndCond = nodesCloneNode(pEvent->pEndCond);
|
||||||
|
pWindow->pTspk = nodesCloneNode(pEvent->pCol);
|
||||||
|
if (NULL == pWindow->pStartCond || NULL == pWindow->pEndCond || NULL == pWindow->pTspk) {
|
||||||
|
nodesDestroyNode((SNode*)pWindow);
|
||||||
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
return createWindowLogicNodeFinalize(pCxt, pSelect, pWindow, pLogicNode);
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t createWindowLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect, SLogicNode** pLogicNode) {
|
static int32_t createWindowLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect, SLogicNode** pLogicNode) {
|
||||||
if (NULL == pSelect->pWindow) {
|
if (NULL == pSelect->pWindow) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -826,6 +849,8 @@ static int32_t createWindowLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSele
|
||||||
return createWindowLogicNodeBySession(pCxt, (SSessionWindowNode*)pSelect->pWindow, pSelect, pLogicNode);
|
return createWindowLogicNodeBySession(pCxt, (SSessionWindowNode*)pSelect->pWindow, pSelect, pLogicNode);
|
||||||
case QUERY_NODE_INTERVAL_WINDOW:
|
case QUERY_NODE_INTERVAL_WINDOW:
|
||||||
return createWindowLogicNodeByInterval(pCxt, (SIntervalWindowNode*)pSelect->pWindow, pSelect, pLogicNode);
|
return createWindowLogicNodeByInterval(pCxt, (SIntervalWindowNode*)pSelect->pWindow, pSelect, pLogicNode);
|
||||||
|
case QUERY_NODE_EVENT_WINDOW:
|
||||||
|
return createWindowLogicNodeByEvent(pCxt, (SEventWindowNode*)pSelect->pWindow, pSelect, pLogicNode);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1297,6 +1297,33 @@ static int32_t createStateWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pC
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t createEventWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren,
|
||||||
|
SWindowLogicNode* pWindowLogicNode, SPhysiNode** pPhyNode) {
|
||||||
|
SEventWinodwPhysiNode* pEvent = (SEventWinodwPhysiNode*)makePhysiNode(
|
||||||
|
pCxt, (SLogicNode*)pWindowLogicNode,
|
||||||
|
(pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT : QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT));
|
||||||
|
if (NULL == pEvent) {
|
||||||
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
|
||||||
|
SDataBlockDescNode* pChildTupe = (((SPhysiNode*)nodesListGetNode(pChildren, 0))->pOutputDataBlockDesc);
|
||||||
|
int32_t code = setNodeSlotId(pCxt, pChildTupe->dataBlockId, -1, pWindowLogicNode->pStartCond, &pEvent->pStartCond);
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
code = setNodeSlotId(pCxt, pChildTupe->dataBlockId, -1, pWindowLogicNode->pEndCond, &pEvent->pEndCond);
|
||||||
|
}
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
code = createWindowPhysiNodeFinalize(pCxt, pChildren, &pEvent->window, pWindowLogicNode);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
*pPhyNode = (SPhysiNode*)pEvent;
|
||||||
|
} else {
|
||||||
|
nodesDestroyNode((SNode*)pEvent);
|
||||||
|
}
|
||||||
|
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t createWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SWindowLogicNode* pWindowLogicNode,
|
static int32_t createWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SWindowLogicNode* pWindowLogicNode,
|
||||||
SPhysiNode** pPhyNode) {
|
SPhysiNode** pPhyNode) {
|
||||||
switch (pWindowLogicNode->winType) {
|
switch (pWindowLogicNode->winType) {
|
||||||
|
@ -1306,6 +1333,8 @@ static int32_t createWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildr
|
||||||
return createSessionWindowPhysiNode(pCxt, pChildren, pWindowLogicNode, pPhyNode);
|
return createSessionWindowPhysiNode(pCxt, pChildren, pWindowLogicNode, pPhyNode);
|
||||||
case WINDOW_TYPE_STATE:
|
case WINDOW_TYPE_STATE:
|
||||||
return createStateWindowPhysiNode(pCxt, pChildren, pWindowLogicNode, pPhyNode);
|
return createStateWindowPhysiNode(pCxt, pChildren, pWindowLogicNode, pPhyNode);
|
||||||
|
case WINDOW_TYPE_EVENT:
|
||||||
|
return createEventWindowPhysiNode(pCxt, pChildren, pWindowLogicNode, pPhyNode);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -729,6 +729,18 @@ static int32_t stbSplSplitState(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t stbSplSplitEventForStream(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
|
||||||
|
return TSDB_CODE_PLAN_INTERNAL_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t stbSplSplitEvent(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
|
||||||
|
if (pCxt->pPlanCxt->streamQuery) {
|
||||||
|
return stbSplSplitEventForStream(pCxt, pInfo);
|
||||||
|
} else {
|
||||||
|
return stbSplSplitSessionOrStateForBatch(pCxt, pInfo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static bool stbSplIsPartTableWinodw(SWindowLogicNode* pWindow) {
|
static bool stbSplIsPartTableWinodw(SWindowLogicNode* pWindow) {
|
||||||
return stbSplHasPartTbname(stbSplGetPartKeys((SLogicNode*)nodesListGetNode(pWindow->node.pChildren, 0)));
|
return stbSplHasPartTbname(stbSplGetPartKeys((SLogicNode*)nodesListGetNode(pWindow->node.pChildren, 0)));
|
||||||
}
|
}
|
||||||
|
@ -741,6 +753,8 @@ static int32_t stbSplSplitWindowForCrossTable(SSplitContext* pCxt, SStableSplitI
|
||||||
return stbSplSplitSession(pCxt, pInfo);
|
return stbSplSplitSession(pCxt, pInfo);
|
||||||
case WINDOW_TYPE_STATE:
|
case WINDOW_TYPE_STATE:
|
||||||
return stbSplSplitState(pCxt, pInfo);
|
return stbSplSplitState(pCxt, pInfo);
|
||||||
|
case WINDOW_TYPE_EVENT:
|
||||||
|
return stbSplSplitEvent(pCxt, pInfo);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -197,6 +197,15 @@ static int32_t adjustStateDataRequirement(SWindowLogicNode* pWindow, EDataOrderL
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t adjustEventDataRequirement(SWindowLogicNode* pWindow, EDataOrderLevel requirement) {
|
||||||
|
if (requirement <= pWindow->node.resultDataOrder) {
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
pWindow->node.resultDataOrder = requirement;
|
||||||
|
pWindow->node.requireDataOrder = requirement;
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t adjustWindowDataRequirement(SWindowLogicNode* pWindow, EDataOrderLevel requirement) {
|
static int32_t adjustWindowDataRequirement(SWindowLogicNode* pWindow, EDataOrderLevel requirement) {
|
||||||
switch (pWindow->winType) {
|
switch (pWindow->winType) {
|
||||||
case WINDOW_TYPE_INTERVAL:
|
case WINDOW_TYPE_INTERVAL:
|
||||||
|
@ -205,6 +214,8 @@ static int32_t adjustWindowDataRequirement(SWindowLogicNode* pWindow, EDataOrder
|
||||||
return adjustSessionDataRequirement(pWindow, requirement);
|
return adjustSessionDataRequirement(pWindow, requirement);
|
||||||
case WINDOW_TYPE_STATE:
|
case WINDOW_TYPE_STATE:
|
||||||
return adjustStateDataRequirement(pWindow, requirement);
|
return adjustStateDataRequirement(pWindow, requirement);
|
||||||
|
case WINDOW_TYPE_EVENT:
|
||||||
|
return adjustEventDataRequirement(pWindow, requirement);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,33 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "planTestUtil.h"
|
||||||
|
#include "planner.h"
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
|
||||||
|
class PlanEventTest : public PlannerTestBase {};
|
||||||
|
|
||||||
|
TEST_F(PlanEventTest, basic) {
|
||||||
|
useDb("root", "test");
|
||||||
|
|
||||||
|
run("SELECT COUNT(*) FROM t1 EVENT_WINDOW START WITH c1 > 10 END WITH c2 = 'abc'");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(PlanEventTest, stable) {
|
||||||
|
useDb("root", "test");
|
||||||
|
|
||||||
|
run("SELECT COUNT(*) FROM st1 EVENT_WINDOW START WITH c1 > 10 END WITH c2 = 'abc'");
|
||||||
|
}
|
|
@ -354,6 +354,10 @@ int32_t taosGetCpuInfo(char *cpuModel, int32_t maxLen, float *numOfCores) {
|
||||||
code = 0;
|
code = 0;
|
||||||
done |= 1;
|
done |= 1;
|
||||||
}
|
}
|
||||||
|
int endPos = strlen(cpuModel)-1;
|
||||||
|
if (cpuModel[endPos] == '\n') {
|
||||||
|
cpuModel[endPos] = '\0';
|
||||||
|
}
|
||||||
taosCloseCmd(&pCmd);
|
taosCloseCmd(&pCmd);
|
||||||
|
|
||||||
pCmd = taosOpenCmd("sysctl -n machdep.cpu.core_count");
|
pCmd = taosOpenCmd("sysctl -n machdep.cpu.core_count");
|
||||||
|
|
|
@ -56,7 +56,7 @@ class TDTestCase:
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
binPath = self.getPath()
|
binPath = self.getPath()
|
||||||
cmd = "%s -F 7 -H 9 -n 10 -t 2 -x -y -M -C -d newtest -l 5 -A binary,nchar\(31\) -b tinyint,binary\(23\),bool,nchar -w 29 -E -m $%%^*" %binPath
|
cmd = "%s -F 7 -n 10 -t 2 -x -y -M -C -d newtest -l 5 -A binary,nchar\(31\) -b tinyint,binary\(23\),bool,nchar -w 29 -E -m $%%^*" %binPath
|
||||||
tdLog.info("%s" % cmd)
|
tdLog.info("%s" % cmd)
|
||||||
os.system("%s" % cmd)
|
os.system("%s" % cmd)
|
||||||
tdSql.execute("use newtest")
|
tdSql.execute("use newtest")
|
||||||
|
|
|
@ -53,7 +53,7 @@ class TDTestCase:
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
binPath = self.getPath()
|
binPath = self.getPath()
|
||||||
cmd = "%s -F abc -P abc -I abc -T abc -H abc -i abc -S abc -B abc -r abc -t abc -n abc -l abc -w abc -w 16385 -R abc -O abc -a abc -n 2 -t 2 -r 1 -y" %binPath
|
cmd = "%s -F abc -P abc -I abc -T abc -i abc -S abc -B abc -r abc -t abc -n abc -l abc -w abc -w 16385 -R abc -O abc -a abc -n 2 -t 2 -r 1 -y" %binPath
|
||||||
tdLog.info("%s" % cmd)
|
tdLog.info("%s" % cmd)
|
||||||
os.system("%s" % cmd)
|
os.system("%s" % cmd)
|
||||||
tdSql.query("select count(*) from test.meters")
|
tdSql.query("select count(*) from test.meters")
|
||||||
|
|
|
@ -122,6 +122,9 @@ function runSimCases() {
|
||||||
|
|
||||||
function runPythonCases() {
|
function runPythonCases() {
|
||||||
echo "=== Run python cases ==="
|
echo "=== Run python cases ==="
|
||||||
|
|
||||||
|
cd $TDENGINE_DIR/tests/parallel_test
|
||||||
|
sed -i '/compatibility.py/d' cases.task
|
||||||
|
|
||||||
cd $TDENGINE_DIR/tests/system-test
|
cd $TDENGINE_DIR/tests/system-test
|
||||||
runCasesOneByOne ../parallel_test/cases.task system-test
|
runCasesOneByOne ../parallel_test/cases.task system-test
|
||||||
|
|
|
@ -552,7 +552,7 @@ sql use test4;
|
||||||
sql create table st (ts timestamp, c1 tinyint, c2 smallint) tags (t1 tinyint) ;
|
sql create table st (ts timestamp, c1 tinyint, c2 smallint) tags (t1 tinyint) ;
|
||||||
sql create table t1 using st tags (-81) ;
|
sql create table t1 using st tags (-81) ;
|
||||||
sql create table t2 using st tags (-81) ;
|
sql create table t2 using st tags (-81) ;
|
||||||
sql create stream if not exists streams4 trigger window_close into streamt4 as select _wstart AS start, min(c1),count(c1) from t1 state_window(c1);
|
sql create stream if not exists streams4 trigger window_close into streamt4 as select _wstart AS startts, min(c1),count(c1) from t1 state_window(c1);
|
||||||
|
|
||||||
sql insert into t1 (ts, c1) values (1668073288209, 11);
|
sql insert into t1 (ts, c1) values (1668073288209, 11);
|
||||||
sql insert into t1 (ts, c1) values (1668073288210, 11);
|
sql insert into t1 (ts, c1) values (1668073288210, 11);
|
||||||
|
@ -567,7 +567,7 @@ loop7:
|
||||||
|
|
||||||
sleep 200
|
sleep 200
|
||||||
|
|
||||||
sql select * from streamt4 order by start;
|
sql select * from streamt4 order by startts;
|
||||||
|
|
||||||
$loop_count = $loop_count + 1
|
$loop_count = $loop_count + 1
|
||||||
if $loop_count == 20 then
|
if $loop_count == 20 then
|
||||||
|
@ -606,7 +606,7 @@ loop8:
|
||||||
|
|
||||||
sleep 200
|
sleep 200
|
||||||
|
|
||||||
sql select * from streamt4 order by start;
|
sql select * from streamt4 order by startts;
|
||||||
|
|
||||||
$loop_count = $loop_count + 1
|
$loop_count = $loop_count + 1
|
||||||
if $loop_count == 20 then
|
if $loop_count == 20 then
|
||||||
|
@ -640,7 +640,7 @@ loop8:
|
||||||
|
|
||||||
sleep 200
|
sleep 200
|
||||||
|
|
||||||
sql select * from streamt4 order by start;
|
sql select * from streamt4 order by startts;
|
||||||
|
|
||||||
$loop_count = $loop_count + 1
|
$loop_count = $loop_count + 1
|
||||||
if $loop_count == 20 then
|
if $loop_count == 20 then
|
||||||
|
@ -679,7 +679,7 @@ loop9:
|
||||||
|
|
||||||
sleep 200
|
sleep 200
|
||||||
|
|
||||||
sql select * from streamt4 order by start;
|
sql select * from streamt4 order by startts;
|
||||||
|
|
||||||
$loop_count = $loop_count + 1
|
$loop_count = $loop_count + 1
|
||||||
if $loop_count == 20 then
|
if $loop_count == 20 then
|
||||||
|
|
|
@ -136,7 +136,7 @@ class TDTestCase:
|
||||||
tdSql.query("use source_db")
|
tdSql.query("use source_db")
|
||||||
tdSql.query("create table if not exists source_db.stb (ts timestamp, k int) tags (a int);")
|
tdSql.query("create table if not exists source_db.stb (ts timestamp, k int) tags (a int);")
|
||||||
tdSql.query("create table source_db.ct1 using source_db.stb tags(1000);create table source_db.ct2 using source_db.stb tags(2000);create table source_db.ct3 using source_db.stb tags(3000);")
|
tdSql.query("create table source_db.ct1 using source_db.stb tags(1000);create table source_db.ct2 using source_db.stb tags(2000);create table source_db.ct3 using source_db.stb tags(3000);")
|
||||||
tdSql.query("create stream s1 into source_db.output_stb as select _wstart AS start, min(k), max(k), sum(k) from source_db.stb interval(10m);")
|
tdSql.query("create stream s1 into source_db.output_stb as select _wstart AS startts, min(k), max(k), sum(k) from source_db.stb interval(10m);")
|
||||||
|
|
||||||
|
|
||||||
#TD-19944 -Q=3
|
#TD-19944 -Q=3
|
||||||
|
|
|
@ -108,7 +108,7 @@ class TDTestCase:
|
||||||
|
|
||||||
|
|
||||||
# create stream
|
# create stream
|
||||||
tdSql.execute('''create stream current_stream into stream_max_stable_1 as select _wstart as start, _wend as wend, max(q_int) as max_int, min(q_bigint) as min_int from stable_1 where ts is not null interval (5s);''')
|
tdSql.execute('''create stream current_stream into stream_max_stable_1 as select _wstart as startts, _wend as wend, max(q_int) as max_int, min(q_bigint) as min_int from stable_1 where ts is not null interval (5s);''')
|
||||||
|
|
||||||
# insert data
|
# insert data
|
||||||
for i in range(num_random*n):
|
for i in range(num_random*n):
|
||||||
|
@ -187,20 +187,20 @@ class TDTestCase:
|
||||||
|
|
||||||
sleep(5)
|
sleep(5)
|
||||||
# stream data check
|
# stream data check
|
||||||
tdSql.query("select start,wend,max_int from stream_max_stable_1 ;")
|
tdSql.query("select startts,wend,max_int from stream_max_stable_1 ;")
|
||||||
tdSql.checkRows(20)
|
tdSql.checkRows(20)
|
||||||
tdSql.query("select sum(max_int) from stream_max_stable_1 ;")
|
tdSql.query("select sum(max_int) from stream_max_stable_1 ;")
|
||||||
stream_data_1 = tdSql.queryResult[0][0]
|
stream_data_1 = tdSql.queryResult[0][0]
|
||||||
tdSql.query("select sum(min_int) from stream_max_stable_1 ;")
|
tdSql.query("select sum(min_int) from stream_max_stable_1 ;")
|
||||||
stream_data_2 = tdSql.queryResult[0][0]
|
stream_data_2 = tdSql.queryResult[0][0]
|
||||||
tdSql.query("select sum(max_int),sum(min_int) from (select _wstart as start, _wend as wend, max(q_int) as max_int, min(q_bigint) as min_int from stable_1 where ts is not null interval (5s));")
|
tdSql.query("select sum(max_int),sum(min_int) from (select _wstart as startts, _wend as wend, max(q_int) as max_int, min(q_bigint) as min_int from stable_1 where ts is not null interval (5s));")
|
||||||
sql_data_1 = tdSql.queryResult[0][0]
|
sql_data_1 = tdSql.queryResult[0][0]
|
||||||
sql_data_2 = tdSql.queryResult[0][1]
|
sql_data_2 = tdSql.queryResult[0][1]
|
||||||
|
|
||||||
self.stream_value_check(stream_data_1,sql_data_1)
|
self.stream_value_check(stream_data_1,sql_data_1)
|
||||||
self.stream_value_check(stream_data_2,sql_data_2)
|
self.stream_value_check(stream_data_2,sql_data_2)
|
||||||
|
|
||||||
tdSql.query("select sum(max_int),sum(min_int) from (select _wstart as start, _wend as wend, max(q_int) as max_int, min(q_bigint) as min_int from stable_1 interval (5s));")
|
tdSql.query("select sum(max_int),sum(min_int) from (select _wstart as startts, _wend as wend, max(q_int) as max_int, min(q_bigint) as min_int from stable_1 interval (5s));")
|
||||||
sql_data_1 = tdSql.queryResult[0][0]
|
sql_data_1 = tdSql.queryResult[0][0]
|
||||||
sql_data_2 = tdSql.queryResult[0][1]
|
sql_data_2 = tdSql.queryResult[0][1]
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,9 @@ static int horizontalPrintWebsocket(WS_RES* wres, double* execute_time) {
|
||||||
const void* data = NULL;
|
const void* data = NULL;
|
||||||
int rows;
|
int rows;
|
||||||
ws_fetch_block(wres, &data, &rows);
|
ws_fetch_block(wres, &data, &rows);
|
||||||
*execute_time += (double)(ws_take_timing(wres)/1E6);
|
if (wres) {
|
||||||
|
*execute_time += (double)(ws_take_timing(wres)/1E6);
|
||||||
|
}
|
||||||
if (!rows) {
|
if (!rows) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -77,7 +79,9 @@ static int verticalPrintWebsocket(WS_RES* wres, double* pexecute_time) {
|
||||||
int rows = 0;
|
int rows = 0;
|
||||||
const void* data = NULL;
|
const void* data = NULL;
|
||||||
ws_fetch_block(wres, &data, &rows);
|
ws_fetch_block(wres, &data, &rows);
|
||||||
*pexecute_time += (double)(ws_take_timing(wres)/1E6);
|
if (wres) {
|
||||||
|
*pexecute_time += (double)(ws_take_timing(wres)/1E6);
|
||||||
|
}
|
||||||
if (!rows) {
|
if (!rows) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -129,7 +133,9 @@ static int dumpWebsocketToFile(const char* fname, WS_RES* wres, double* pexecute
|
||||||
int rows = 0;
|
int rows = 0;
|
||||||
const void* data = NULL;
|
const void* data = NULL;
|
||||||
ws_fetch_block(wres, &data, &rows);
|
ws_fetch_block(wres, &data, &rows);
|
||||||
*pexecute_time += (double)(ws_take_timing(wres)/1E6);
|
if (wres) {
|
||||||
|
*pexecute_time += (double)(ws_take_timing(wres)/1E6);
|
||||||
|
}
|
||||||
if (!rows) {
|
if (!rows) {
|
||||||
taosCloseFile(&pFile);
|
taosCloseFile(&pFile);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -223,17 +229,23 @@ void shellRunSingleCommandWebsocketImp(char *command) {
|
||||||
if (code == TSDB_CODE_WS_SEND_TIMEOUT || code == TSDB_CODE_WS_RECV_TIMEOUT) {
|
if (code == TSDB_CODE_WS_SEND_TIMEOUT || code == TSDB_CODE_WS_RECV_TIMEOUT) {
|
||||||
fprintf(stderr, "Hint: use -t to increase the timeout in seconds\n");
|
fprintf(stderr, "Hint: use -t to increase the timeout in seconds\n");
|
||||||
} else if (code == TSDB_CODE_WS_INTERNAL_ERRO || code == TSDB_CODE_WS_CLOSED) {
|
} else if (code == TSDB_CODE_WS_INTERNAL_ERRO || code == TSDB_CODE_WS_CLOSED) {
|
||||||
fprintf(stderr, "TDengine server is disconnected, will try to reconnect\n");
|
|
||||||
shell.ws_conn = NULL;
|
shell.ws_conn = NULL;
|
||||||
}
|
}
|
||||||
ws_free_result(res);
|
ws_free_result(res);
|
||||||
if (reconnectNum == 0) continue;
|
if (reconnectNum == 0) {
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "TDengine server is disconnected, will try to reconnect\n");
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
double execute_time = ws_take_timing(res)/1E6;
|
double execute_time = 0;
|
||||||
|
if (res) {
|
||||||
|
execute_time = ws_take_timing(res)/1E6;
|
||||||
|
}
|
||||||
|
|
||||||
if (shellRegexMatch(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) {
|
if (shellRegexMatch(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) {
|
||||||
fprintf(stdout, "Database changed.\r\n\r\n");
|
fprintf(stdout, "Database changed.\r\n\r\n");
|
||||||
|
|
Loading…
Reference in New Issue