Merge branch '3.0' into fix/TD-26189-libs3

This commit is contained in:
Minglei Jin 2023-10-23 16:31:21 +08:00
commit 80fb38f172
349 changed files with 13752 additions and 13400 deletions

5
SECURITY.md Normal file
View File

@ -0,0 +1,5 @@
# Security Policy
## Reporting a Vulnerability
Please submit CVE to https://github.com/taosdata/TDengine/security/advisories.

View File

@ -93,6 +93,8 @@ ELSE()
set(VAR_TSZ "TSZ" CACHE INTERNAL "global variant tsz" )
ENDIF()
# force set all platform to JEMALLOC_ENABLED = false
SET(JEMALLOC_ENABLED OFF)
IF (TD_WINDOWS)
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
@ -116,8 +118,6 @@ IF (TD_WINDOWS)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMMON_FLAGS}")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_FLAGS}")
SET(JEMALLOC_ENABLED OFF)
ELSE ()
IF (${TD_DARWIN})
set(CMAKE_MACOSX_RPATH 0)

View File

@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "3.2.0.0.alpha")
SET(TD_VER_NUMBER "3.2.1.0.alpha")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -428,7 +428,7 @@ if(${BUILD_WITH_COS})
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include)
#MESSAGE("$ENV{HOME}/.cos-local.1/include")
set(CMAKE_BUILD_TYPE debug)
set(CMAKE_BUILD_TYPE Release)
set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME})
set(CMAKE_PROJECT_NAME cos_c_sdk)

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -4,11 +4,11 @@ description: This document introduces the major features, competitive advantages
toc_max_heading_level: 2
---
TDengine is an [open source](https://tdengine.com/tdengine/open-source-time-series-database/), [high-performance](https://tdengine.com/tdengine/high-performance-time-series-database/), [cloud native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation.
TDengine is a big data platform designed and optimized for IoT (Internet of Things) and Industrial Internet. It can safely and effetively converge, store, process and distribute high volume data (TB or even PB) generated everyday by a lot of devices and data acquisition units, monitor and alert business operation status in real time and provide real time business insight. The core component of TDengine is TDengine OSS, which is a high performance, open source, cloud native and simplified time series database.
This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine.
## Major Features
## Major Features of TDengine OSS
The major features are listed below:
@ -132,3 +132,9 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
- [Introduction to Time-Series Database](https://tdengine.com/tsdb/)
- [Introduction to TDengine competitive advantages](https://tdengine.com/tdengine/)
## Products
There are two products offered by TDengine: TDengine Enterprise and TDengine Cloud, for details please refer to
- [TDengine Enterprise](https://www.taosdata.com/tdengine-pro)
- [TDengine Cloud](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)

View File

@ -221,7 +221,7 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
&& tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1
```
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please check available version from [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
> Version number `v1.0.2` is only for example, it can be replaced to any newer version.
**Non-Swoole Environment: **

View File

@ -55,7 +55,7 @@ At most 4096 columns are allowed in a STable. If there are more than 4096 of met
## Create Table
A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Additionally, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the meters in [Table 1](/tdinternal/arch#model_table1), the table can be created using below SQL statement.
A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Additionally, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the smart meters table, the table can be created using below SQL statement.
```sql
CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2);

View File

@ -38,7 +38,10 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
- All the data in `tag_set` will be converted to NCHAR type automatically
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h)
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
- The rule of table name
- The child table name is created automatically in a rule to guarantee its uniqueness.
- You can configure `smlAutoChildTableNameDelimiter` in taos.cfg to specify a delimiter between tag values as the table names. For example, you set `smlAutoChildTableNameDelimiter=-` in taos.cfg, when you insert `st,t0=cpu1,t1=4 c1=3 1626006833639000000`, the child table will be `cpu1-4`
- You can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, smlDataFormat is discarded since 3.0.3.0)
:::

View File

@ -33,7 +33,10 @@ For example:
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
```
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
- The rule of table name
- The child table name is created automatically in a rule to guarantee its uniqueness.
- You can configure `smlAutoChildTableNameDelimiter` in taos.cfg to specify a delimiter between tag values as the table names. For example, you set `smlAutoChildTableNameDelimiter=-` in taos.cfg, when you insert `st,t0=cpu1,t1=4 c1=3 1626006833639000000`, the child table will be `cpu1-4`
- You can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details.

View File

@ -48,7 +48,10 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
:::note
- In JSON protocol, strings will be converted to NCHAR type and numeric values will be converted to double type.
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
- The rule of table name
- The child table name is created automatically in a rule to guarantee its uniqueness.
- You can configure `smlAutoChildTableNameDelimiter` in taos.cfg to specify a delimiter between tag values as the table names. For example, you set `smlAutoChildTableNameDelimiter=-` in taos.cfg, when you insert `st,t0=cpu1,t1=4 c1=3 1626006833639000000`, the child table will be `cpu1-4`
- You can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
:::

View File

@ -352,10 +352,11 @@ You configure the following parameters when creating a consumer:
| `td.connect.port` | string | Port of the server side | |
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. Each topic can create up to 100 consumer groups. |
| `client.id` | string | Client ID | Maximum length: 192. |
| `auto.offset.reset` | enum | Initial offset for the consumer group | `earliest`: subscribe from the earliest data, this is the default behavior; `latest`: subscribe from the latest data; or `none`: can't subscribe without committed offset|
| `auto.offset.reset` | enum | Initial offset for the consumer group | `earliest`: subscribe from the earliest data, this is the default behavior(version < 3.2.0.0); `latest`: subscribe from the latest data, this is the default behavior(version >= 3.2.0.0); or `none`: can't subscribe without committed offset|
| `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true |
| `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds |
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages | default value: false
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages. Not applicable if subscribe to a column (tbname can be written as a column in the subquery statement during column subscriptions) (This parameter has been deprecated since version 3.2.0.0 and remains true) | default value: false
| `enable.replay` | boolean | Specify whether data replay function enabled or not |default value: false |
The method of specifying these parameters depends on the language used:
@ -371,7 +372,7 @@ tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
tmq_conf_set(conf, "group.id", "cgrpName");
tmq_conf_set(conf, "td.connect.user", "root");
tmq_conf_set(conf, "td.connect.pass", "taosdata");
tmq_conf_set(conf, "auto.offset.reset", "earliest");
tmq_conf_set(conf, "auto.offset.reset", "latest");
tmq_conf_set(conf, "msg.with.table.name", "true");
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
@ -401,7 +402,7 @@ properties.setProperty("group.id", "cgrpName");
properties.setProperty("bootstrap.servers", "127.0.0.1:6030");
properties.setProperty("td.connect.user", "root");
properties.setProperty("td.connect.pass", "taosdata");
properties.setProperty("auto.offset.reset", "earliest");
properties.setProperty("auto.offset.reset", "latest");
properties.setProperty("msg.with.table.name", "true");
properties.setProperty("value.deserializer", "com.taos.example.MetersDeserializer");
@ -421,7 +422,7 @@ public class MetersDeserializer extends ReferenceDeserializer<Meters> {
```go
conf := &tmq.ConfigMap{
"group.id": "test",
"auto.offset.reset": "earliest",
"auto.offset.reset": "latest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
@ -441,7 +442,7 @@ consumer, err := NewConsumer(conf)
let mut dsn: Dsn = "taos://".parse()?;
dsn.set("group.id", "group1");
dsn.set("client.id", "test");
dsn.set("auto.offset.reset", "earliest");
dsn.set("auto.offset.reset", "latest");
let tmq = TmqBuilder::from_dsn(dsn)?;
@ -458,7 +459,19 @@ from taos.tmq import Consumer
# Syntax: `consumer = Consumer(configs)`
#
# Example:
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
consumer = Consumer(
{
"group.id": "local",
"client.id": "1",
"enable.auto.commit": "true",
"auto.commit.interval.ms": "1000",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"auto.offset.reset": "latest",
"msg.with.table.name": "true",
}
)
```
</TabItem>
@ -475,7 +488,7 @@ let consumer = taos.consumer({
'group.id': 'tg2',
'td.connect.user': 'root',
'td.connect.pass': 'taosdata',
'auto.offset.reset','earliest',
'auto.offset.reset','latest',
'msg.with.table.name': 'true',
'td.connect.ip','127.0.0.1',
'td.connect.port','6030'
@ -498,7 +511,7 @@ var cfg = new ConsumerConfig
GourpId = "TDengine-TMQ-C#",
TDConnectUser = "root",
TDConnectPasswd = "taosdata",
AutoOffsetReset = "earliest"
AutoOffsetReset = "latest"
MsgWithTableName = "true",
TDConnectIp = "127.0.0.1",
TDConnectPort = "6030"
@ -514,6 +527,24 @@ var consumer = new ConsumerBuilder(cfg).Build();
A consumer group is automatically created when multiple consumers are configured with the same consumer group ID.
Data replay function description:
- Subscription adds replay function, which replays according to the time of data writing.
For example, writing three pieces of data at the following time.
```sql
2023/09/22 00:00:00.000
2023/09/22 00:00:05.000
2023/09/22 00:00:08.000
```
After subscribing to the first data for 5 seconds, the second data is returned, and after obtaining the second data for 3 seconds, the third data is returned.
- Only column subscriptions support data replay.
- Replay needs to ensure an independent timeline
- If it is a sub table subscription or a normal table subscription, only one vnode has data, ensuring a timeline.
- If subscribing to a super table, it is necessary to ensure that the DB has only one vnode, otherwise an error will be reported (because the data subscribed to on multiple vnodes is not on the same timeline).
- Super table and database subscriptions do not support replay
- Add the enable.replay parameter. True indicates that the subscription replay function is enabled, while false indicates that the subscription replay function is not enabled by default.
- Replay does not support progress saving, so when the replay parameter enable, auto commit will automatically close.
- Due to the processing time required for data replay, there is an error of tens of milliseconds in the accuracy of replay.
## Subscribe to a Topic
A single consumer can subscribe to multiple topics.

View File

@ -12,7 +12,7 @@ The FQDN of all hosts must be setup properly. For e.g. FQDNs may have to be conf
### Step 1
If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](../../operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`.
If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`.
:::note
FQDN information is written to file. If you have started TDengine without configuring or changing the FQDN, ensure that data is backed up or no longer needed before running the `rm -rf /var/lib\taos/\*` command.

View File

@ -24,7 +24,7 @@ SELECT [hints] [DISTINCT] select_list
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
hint:
BATCH_SCAN | NO_BATCH_SCAN
BATCH_SCAN | NO_BATCH_SCAN | SORT_FOR_GROUP
select_list:
select_expr [, select_expr] ...
@ -87,15 +87,17 @@ Hints are a means of user control over query optimization for individual stateme
The list of currently supported Hints is as follows:
| **Hint** | **Params** | **Comment** | **Scopt** |
| :-----------: | -------------- | -------------------------- | -------------------------- |
| BATCH_SCAN | None | Batch table scan | JOIN statment for stable |
| NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable |
| **Hint** | **Params** | **Comment** | **Scopt** |
| :-----------: | -------------- | -------------------------- | -----------------------------------|
| BATCH_SCAN | None | Batch table scan | JOIN statment for stable |
| NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable |
| SORT_FOR_GROUP| None | Use sort for partition | With normal column in partition by list |
For example:
```sql
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
SELECT /*+ SORT_FOR_GROUP() */ count(*), c1 FROM stable1 PARTITION BY c1;
```
## Lists

View File

@ -54,6 +54,7 @@ LIKE is used together with wildcards to match strings. Its usage is described as
MATCH and NMATCH are used together with regular expressions to match strings. Their usage is described as follows:
- Use POSIX regular expression syntax. For more information, see Regular Expressions.
- The `MATCH` operator returns true when the regular expression is matched. The `NMATCH` operator returns true when the regular expression is not matched.
- Regular expression can be used against only table names, i.e. `tbname`, and tags/columns of binary/nchar types.
- The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on the client side, and will take effect after restarting the client.

View File

@ -180,6 +180,7 @@ The following list shows all reserved keywords:
- MAX_DELAY
- BWLIMIT
- MAXROWS
- MAX_SPEED
- MERGE
- META
- MINROWS

View File

@ -26,75 +26,85 @@ This document introduces the tables of INFORMATION_SCHEMA and their structure.
## INS_DNODES
Provides information about dnodes. Similar to SHOW DNODES.
Provides information about dnodes. Similar to SHOW DNODES. Users whose SYSINFO attribute is 0 can't view this table.
| # | **Column** | **Data Type** | **Description** |
| --- | :------------: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 1 | vnodes | SMALLINT | Current number of vnodes on the dnode. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 2 | support_vnodes | SMALLINT | Maximum number of vnodes on the dnode |
| 3 | status | BINARY(10) | Current status |
| 4 | note | BINARY(256) | Reason for going offline or other information |
| 3 | status | VARCHAR(10) | Current status |
| 4 | note | VARCHAR(256) | Reason for going offline or other information |
| 5 | id | SMALLINT | Dnode ID |
| 6 | endpoint | BINARY(134) | Dnode endpoint |
| 6 | endpoint | VARCHAR(134) | Dnode endpoint |
| 7 | create | TIMESTAMP | Creation time |
## INS_MNODES
Provides information about mnodes. Similar to SHOW MNODES.
Provides information about mnodes. Similar to SHOW MNODES. Users whose SYSINFO attribute is 0 can't view this table.
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------- | ------------------------------------------ |
| 1 | id | SMALLINT | Mnode ID |
| 2 | endpoint | BINARY(134) | Mnode endpoint |
| 3 | role | BINARY(10) | Current role |
| 2 | endpoint | VARCHAR(134) | Mnode endpoint |
| 3 | role | VARCHAR(10) | Current role |
| 4 | role_time | TIMESTAMP | Time at which the current role was assumed |
| 5 | create_time | TIMESTAMP | Creation time |
## INS_QNODES
Provides information about qnodes. Similar to SHOW QNODES.
Provides information about qnodes. Similar to SHOW QNODES. Users whose SYSINFO attribute is 0 can't view this table.
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------- | --------------- |
| 1 | id | SMALLINT | Qnode ID |
| 2 | endpoint | BINARY(134) | Qnode endpoint |
| 2 | endpoint | VARCHAR(134) | Qnode endpoint |
| 3 | create_time | TIMESTAMP | Creation time |
## INS_SNODES
Provides information about snodes. Similar to SHOW SNODES. Users whose SYSINFO attribute is 0 can't view this table.
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------- | --------------- |
| 1 | id | SMALLINT | Snode ID |
| 2 | endpoint | VARCHAR(134) | Snode endpoint |
| 3 | create_time | TIMESTAMP | Creation time |
## INS_CLUSTER
Provides information about the cluster.
Provides information about the cluster. Users whose SYSINFO attribute is 0 can't view this table.
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------- | --------------- |
| 1 | id | BIGINT | Cluster ID |
| 2 | name | BINARY(134) | Cluster name |
| 2 | name | VARCHAR(134) | Cluster name |
| 3 | create_time | TIMESTAMP | Creation time |
## INS_DATABASES
Provides information about user-created databases. Similar to SHOW DATABASES.
| # | **Column** | **Data Type** | **Description** |
| # | **Column** | **Data Type** | **Description** |
| --- | :------------------: | ---------------- | ------------------------------------------------ |
| 1| name| BINARY(32)| Database name |
| 1 | name | VARCHAR(64) | Database name |
| 2 | create_time | TIMESTAMP | Creation time |
| 3 | ntables | INT | Number of standard tables and subtables (not including supertables) |
| 4 | vgroups | INT | Number of vgroups. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 6 | replica | INT | Number of replicas. It should be noted that `replica` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 7 | strict | BINARY(4) | Obsoleted |
| 8 | duration | INT | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | keep | INT | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 7 | strict | VARCHAR(4) | Obsoleted |
| 8 | duration | VARCHAR(10) | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | keep | VARCHAR(32) | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 10 | buffer | INT | Write cache size per vnode, in MB. It should be noted that `buffer` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 11 | pagesize | INT | Page size for vnode metadata storage engine, in KB. It should be noted that `pagesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 12 | pages | INT | Number of pages per vnode metadata storage engine. It should be noted that `pages` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 13 | minrows | INT | Maximum number of records per file block. It should be noted that `minrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 14 | maxrows | INT | Minimum number of records per file block. It should be noted that `maxrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 15 | comp | INT | Compression method. It should be noted that `comp` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 16 | precision | BINARY(2) | Time precision. It should be noted that `precision` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 17 | status | BINARY(10) | Current database status |
| 18 | retentions | BINARY (60) | Aggregation interval and retention period. It should be noted that `retentions` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 16 | precision | VARCHAR(2) | Time precision. It should be noted that `precision` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 17 | status | VARCHAR(10) | Current database status |
| 18 | retentions | VARCHAR(60) | Aggregation interval and retention period. It should be noted that `retentions` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 19 | single_stable | BOOL | Whether the database can contain multiple supertables. It should be noted that `single_stable` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 20 | cachemodel | BINARY(60) | Caching method for the newest data. It should be noted that `cachemodel` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 20 | cachemodel | VARCHAR(60) | Caching method for the newest data. It should be noted that `cachemodel` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 21 | cachesize | INT | Memory per vnode used for caching the newest data. It should be noted that `cachesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 22 | wal_level | INT | WAL level. It should be noted that `wal_level` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk. It should be noted that `wal_fsync_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
@ -111,15 +121,15 @@ Provides information about user-defined functions.
| # | **Column** | **Data Type** | **Description** |
| --- | :-----------: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 1 | name | BINARY(64) | Function name |
| 2 | comment | BINARY(255) | Function description. It should be noted that `comment` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 1 | name | VARCHAR(64) | Function name |
| 2 | comment | VARCHAR(255) | Function description. It should be noted that `comment` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 3 | aggregate | INT | Whether the UDF is an aggregate function. It should be noted that `aggregate` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 4 | output_type | BINARY(31) | Output data type |
| 4 | output_type | VARCHAR(31) | Output data type |
| 5 | create_time | TIMESTAMP | Creation time |
| 6 | code_len | INT | Length of the source code |
| 7 | bufsize | INT | Buffer size |
| 8 | func_language | BINARY(31) | UDF programming language |
| 9 | func_body | BINARY(16384) | UDF function body |
| 8 | func_language | VARCHAR(31) | UDF programming language |
| 9 | func_body | VARCHAR(16384) | UDF function body |
| 10 | func_version | INT | UDF function version. starting from 0. Increasing by 1 each time it is updated |
## INS_INDEXES
@ -128,12 +138,12 @@ Provides information about user-created indices. Similar to SHOW INDEX.
| # | **Column** | **Data Type** | **Description** |
| --- | :--------------: | ------------- | --------------------------------------------------------------------- |
| 1 | db_name | BINARY(32) | Database containing the table with the specified index |
| 2 | table_name | BINARY(192) | Table containing the specified index |
| 3 | index_name | BINARY(192) | Index name |
| 4 | db_name | BINARY(64) | Index column |
| 5 | index_type | BINARY(10) | SMA or tag index |
| 6 | index_extensions | BINARY(256) | Other information For SMA/tag indices, this shows a list of functions |
| 1 | db_name | VARCHAR(32) | Database containing the table with the specified index |
| 2 | table_name | VARCHAR(192) | Table containing the specified index |
| 3 | index_name | VARCHAR(192) | Index name |
| 4 | db_name | VARCHAR(64) | Index column |
| 5 | index_type | VARCHAR(10) | SMA or tag index |
| 6 | index_extensions | VARCHAR(256) | Other information For SMA/tag indices, this shows a list of functions |
## INS_STABLES
@ -141,16 +151,16 @@ Provides information about supertables.
| # | **Column** | **Data Type** | **Description** |
| --- | :-----------: | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 1 | stable_name | BINARY(192) | Supertable name |
| 2 | db_name | BINARY(64) | All databases in the supertable |
| 1 | stable_name | VARCHAR(192) | Supertable name |
| 2 | db_name | VARCHAR(64) | All databases in the supertable |
| 3 | create_time | TIMESTAMP | Creation time |
| 4 | columns | INT | Number of columns |
| 5 | tags | INT | Number of tags. It should be noted that `tags` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 6 | last_update | TIMESTAMP | Last updated time |
| 7 | table_comment | BINARY(1024) | Table description |
| 8 | watermark | BINARY(64) | Window closing time. It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | max_delay | BINARY(64) | Maximum delay for pushing stream processing results. It should be noted that `max_delay` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 10 | rollup | BINARY(128) | Rollup aggregate function. It should be noted that `rollup` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 7 | table_comment | VARCHAR(1024) | Table description |
| 8 | watermark | VARCHAR(64) | Window closing time. It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | max_delay | VARCHAR(64) | Maximum delay for pushing stream processing results. It should be noted that `max_delay` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 10 | rollup | VARCHAR(128) | Rollup aggregate function. It should be noted that `rollup` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
## INS_TABLES
@ -158,37 +168,37 @@ Provides information about standard tables and subtables.
| # | **Column** | **Data Type** | **Description** |
| --- | :-----------: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------- |
| 1 | table_name | BINARY(192) | Table name |
| 2 | db_name | BINARY(64) | Database name |
| 1 | table_name | VARCHAR(192) | Table name |
| 2 | db_name | VARCHAR(64) | Database name |
| 3 | create_time | TIMESTAMP | Creation time |
| 4 | columns | INT | Number of columns |
| 5 | stable_name | BINARY(192) | Supertable name |
| 5 | stable_name | VARCHAR(192) | Supertable name |
| 6 | uid | BIGINT | Table ID |
| 7 | vgroup_id | INT | Vgroup ID |
| 8 | ttl | INT | Table time-to-live. It should be noted that `ttl` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | table_comment | BINARY(1024) | Table description |
| 10 | type | BINARY(20) | Table type |
| 9 | table_comment | VARCHAR(1024) | Table description |
| 10 | type | VARCHAR(20) | Table type |
## INS_TAGS
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------- | --------------- |
| 1 | table_name | BINARY(192) | Table name |
| 2 | db_name | BINARY(64) | Database name |
| 3 | stable_name | BINARY(192) | Supertable name |
| 4 | tag_name | BINARY(64) | Tag name |
| 5 | tag_type | BINARY(64) | Tag type |
| 6 | tag_value | BINARY(16384) | Tag value |
| 1 | table_name | VARCHAR(192) | Table name |
| 2 | db_name | VARCHAR(64) | Database name |
| 3 | stable_name | VARCHAR(192) | Supertable name |
| 4 | tag_name | VARCHAR(64) | Tag name |
| 5 | tag_type | VARCHAR(64) | Tag type |
| 6 | tag_value | VARCHAR(16384) | Tag value |
## INS_COLUMNS
| # | **Column** | **Data Type** | **Description** |
| --- | :-----------: | ------------- | ---------------- |
| 1 | table_name | BINARY(192) | Table name |
| 2 | db_name | BINARY(64) | Database name |
| 3 | table_type | BINARY(21) | Table type |
| 4 | col_name | BINARY(64) | Column name |
| 5 | col_type | BINARY(32) | Column type |
| 1 | table_name | VARCHAR(192) | Table name |
| 2 | db_name | VARCHAR(64) | Database name |
| 3 | table_type | VARCHAR(21) | Table type |
| 4 | col_name | VARCHAR(64) | Column name |
| 5 | col_type | VARCHAR(32) | Column type |
| 6 | col_length | INT | Column length |
| 7 | col_precision | INT | Column precision |
| 8 | col_scale | INT | Column scale |
@ -196,51 +206,51 @@ Provides information about standard tables and subtables.
## INS_USERS
Provides information about TDengine users.
Provides information about TDengine users. Users whose SYSINFO attribute is 0 can't view this table.
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------- | ---------------- |
| 1 | user_name | BINARY(23) | User name |
| 2 | privilege | BINARY(256) | User permissions |
| 1 | user_name | VARCHAR(23) | User name |
| 2 | privilege | VARCHAR(256) | User permissions |
| 3 | create_time | TIMESTAMP | Creation time |
## INS_GRANTS
Provides information about TDengine Enterprise Edition permissions.
Provides information about TDengine Enterprise Edition permissions. Users whose SYSINFO attribute is 0 can't view this table.
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 1 | version | BINARY(9) | Whether the deployment is a licensed or trial version |
| 2 | cpu_cores | BINARY(9) | CPU cores included in license |
| 3 | dnodes | BINARY(10) | Dnodes included in license. It should be noted that `dnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 4 | streams | BINARY(10) | Streams included in license. It should be noted that `streams` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 5 | users | BINARY(10) | Users included in license. It should be noted that `users` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 6 | accounts | BINARY(10) | Accounts included in license. It should be noted that `accounts` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 7 | storage | BINARY(21) | Storage space included in license. It should be noted that `storage` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 8 | connections | BINARY(21) | Client connections included in license. It should be noted that `connections` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | databases | BINARY(11) | Databases included in license. It should be noted that `databases` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 10 | speed | BINARY(9) | Write speed specified in license (data points per second) |
| 11 | querytime | BINARY(9) | Total query time specified in license |
| 12 | timeseries | BINARY(21) | Number of metrics included in license |
| 13 | expired | BINARY(5) | Whether the license has expired |
| 14 | expire_time | BINARY(19) | When the trial period expires |
| 1 | version | VARCHAR(9) | Whether the deployment is a licensed or trial version |
| 2 | cpu_cores | VARCHAR(9) | CPU cores included in license |
| 3 | dnodes | VARCHAR(10) | Dnodes included in license. It should be noted that `dnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 4 | streams | VARCHAR(10) | Streams included in license. It should be noted that `streams` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 5 | users | VARCHAR(10) | Users included in license. It should be noted that `users` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 6 | accounts | VARCHAR(10) | Accounts included in license. It should be noted that `accounts` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 7 | storage | VARCHAR(21) | Storage space included in license. It should be noted that `storage` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 8 | connections | VARCHAR(21) | Client connections included in license. It should be noted that `connections` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | databases | VARCHAR(11) | Databases included in license. It should be noted that `databases` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 10 | speed | VARCHAR(9) | Write speed specified in license (data points per second) |
| 11 | querytime | VARCHAR(9) | Total query time specified in license |
| 12 | timeseries | VARCHAR(21) | Number of metrics included in license |
| 13 | expired | VARCHAR(5) | Whether the license has expired |
| 14 | expire_time | VARCHAR(19) | When the trial period expires |
## INS_VGROUPS
Provides information about vgroups.
Provides information about vgroups. Users whose SYSINFO attribute is 0 can't view this table.
| # | **Column** | **Data Type** | **Description** |
| --- | :--------: | ------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
| 1 | vgroup_id | INT | Vgroup ID |
| 2 | db_name | BINARY(32) | Database name |
| 2 | db_name | VARCHAR(32) | Database name |
| 3 | tables | INT | Tables in vgroup. It should be noted that `tables` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 4 | status | BINARY(10) | Vgroup status |
| 4 | status | VARCHAR(10) | Vgroup status |
| 5 | v1_dnode | INT | Dnode ID of first vgroup member |
| 6 | v1_status | BINARY(10) | Status of first vgroup member |
| 6 | v1_status | VARCHAR(10) | Status of first vgroup member |
| 7 | v2_dnode | INT | Dnode ID of second vgroup member |
| 8 | v2_status | BINARY(10) | Status of second vgroup member |
| 8 | v2_status | VARCHAR(10) | Status of second vgroup member |
| 9 | v3_dnode | INT | Dnode ID of third vgroup member |
| 10 | v3_status | BINARY(10) | Status of third vgroup member |
| 10 | v3_status | VARCHAR(10) | Status of third vgroup member |
| 11 | nfiles | INT | Number of data and metadata files in the vgroup |
| 12 | file_size | INT | Size of the data and metadata files in the vgroup |
| 13 | tsma | TINYINT | Whether time-range-wise SMA is enabled. 1 means enabled; 0 means disabled. |
@ -251,55 +261,57 @@ Provides system configuration information.
| # | **Column** | **Data Type** | **Description** |
| --- | :--------: | ------------- | ----------------------------------------------------------------------------------------------------------------------- |
| 1 | name | BINARY(32) | Parameter |
| 2 | value | BINARY(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 1 | name | VARCHAR(32) | Parameter |
| 2 | value | VARCHAR(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
## INS_DNODE_VARIABLES
Provides dnode configuration information.
Provides dnode configuration information. Users whose SYSINFO attribute is 0 can't view this table.
| # | **Column** | **Data Type** | **Description** |
| --- | :--------: | ------------- | ----------------------------------------------------------------------------------------------------------------------- |
| 1 | dnode_id | INT | Dnode ID |
| 2 | name | BINARY(32) | Parameter |
| 3 | value | BINARY(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 2 | name | VARCHAR(32) | Parameter |
| 3 | value | VARCHAR(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
## INS_TOPICS
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------- | -------------------------------------- |
| 1 | topic_name | BINARY(192) | Topic name |
| 2 | db_name | BINARY(64) | Database for the topic |
| 1 | topic_name | VARCHAR(192) | Topic name |
| 2 | db_name | VARCHAR(64) | Database for the topic |
| 3 | create_time | TIMESTAMP | Creation time |
| 4 | sql | BINARY(1024) | SQL statement used to create the topic |
| 4 | sql | VARCHAR(1024) | SQL statement used to create the topic |
## INS_SUBSCRIPTIONS
| # | **Column** | **Data Type** | **Description** |
| --- | :------------: | ------------- | --------------------------- |
| 1 | topic_name | BINARY(204) | Subscribed topic |
| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
| 1 | topic_name | VARCHAR(204) | Subscribed topic |
| 2 | consumer_group | VARCHAR(193) | Subscribed consumer group |
| 3 | vgroup_id | INT | Vgroup ID for the consumer |
| 4 | consumer_id | BIGINT | Consumer ID |
| 5 | offset | BINARY(64) | Consumption progress |
| 5 | offset | VARCHAR(64) | Consumption progress |
| 6 | rows | BIGINT | Number of consumption items |
## INS_STREAMS
| # | **Column** | **Data Type** | **Description** |
| --- | :----------: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 1 | stream_name | BINARY(64) | Stream name |
| 1 | stream_name | VARCHAR(64) | Stream name |
| 2 | create_time | TIMESTAMP | Creation time |
| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
| 4 | status | BINARY(20) | Current status |
| 5 | source_db | BINARY(64) | Source database |
| 6 | target_db | BINARY(64) | Target database |
| 7 | target_table | BINARY(192) | Target table |
| 3 | sql | VARCHAR(1024) | SQL statement used to create the stream |
| 4 | status | VARCHAR(20) | Current status |
| 5 | source_db | VARCHAR(64) | Source database |
| 6 | target_db | VARCHAR(64) | Target database |
| 7 | target_table | VARCHAR(192) | Target table |
| 8 | watermark | BIGINT | Watermark (see stream processing documentation). It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation). It should be noted that `trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
## INS_USER_PRIVILEGES
Users whose SYSINFO attribute is 0 can't view this table.
| # | **Column** | **Data Type** | **Description** |** |
| --- | :----------: | ------------ | -------------------------------------------|
| 1 | user_name | VARCHAR(24) | Username |

View File

@ -73,10 +73,10 @@ Shows the SQL statement used to create the specified table. This statement can b
## SHOW DATABASES
```sql
SHOW DATABASES;
SHOW [USER | SYSTEM] DATABASES;
```
Shows all user-created databases.
Shows all databases. The `USER` qualifier specifies only user-created databases. The `SYSTEM` qualifier specifies only system databases.
## SHOW DNODES
@ -183,10 +183,10 @@ Shows all subscriptions in the system.
## SHOW TABLES
```sql
SHOW [db_name.]TABLES [LIKE 'pattern'];
SHOW [NORMAL | CHILD] [db_name.]TABLES [LIKE 'pattern'];
```
Shows all standard tables and subtables in the current database. You can use LIKE for fuzzy matching.
Shows all standard tables and subtables in the current database. You can use LIKE for fuzzy matching. The `Normal` qualifier specifies standard tables. The `CHILD` qualifier specifies subtables.
## SHOW TABLE DISTRIBUTED

View File

@ -1,178 +0,0 @@
---
title: Install and Uninstall
description: This document describes how to install, upgrade, and uninstall TDengine.
---
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
This document gives more information about installing, uninstalling, and upgrading TDengine.
## Install
About details of installing TDenine, please refer to [Installation Guide](../../get-started/package/).
## Uninstall
<Tabs>
<TabItem label="Uninstall by apt-get" value="aptremove">
Uninstall package of TDengine by apt-get can be uninstalled as below:
```bash
$ sudo apt-get remove tdengine
Reading package lists... Done
Building dependency tree
Reading state information... Done
The following packages will be REMOVED:
tdengine
0 upgraded, 0 newly installed, 1 to remove and 18 not upgraded.
After this operation, 68.3 MB disk space will be freed.
Do you want to continue? [Y/n] y
(Reading database ... 135625 files and directories currently installed.)
Removing tdengine (3.0.0.0) ...
TDengine is removed successfully!
```
If you have installed taos-tools, please uninstall it first before uninstall TDengine. The command of uninstall is following:
```
$ sudo apt remove taostools
Reading package lists... Done
Building dependency tree
Reading state information... Done
The following packages will be REMOVED:
taostools
0 upgraded, 0 newly installed, 1 to remove and 0 not upgraded.
After this operation, 68.3 MB disk space will be freed.
Do you want to continue? [Y/n]
(Reading database ... 147973 files and directories currently installed.)
Removing taostools (2.1.2) ...
```
</TabItem>
<TabItem label="Uninstall Deb" value="debuninst">
Deb package of TDengine can be uninstalled as below:
```
$ sudo dpkg -r tdengine
(Reading database ... 137504 files and directories currently installed.)
Removing tdengine (3.0.0.0) ...
TDengine is removed successfully!
```
Deb package of taosTools can be uninstalled as below:
```
$ sudo dpkg -r taostools
(Reading database ... 147973 files and directories currently installed.)
Removing taostools (2.1.2) ...
```
</TabItem>
<TabItem label="Uninstall RPM" value="rpmuninst">
RPM package of TDengine can be uninstalled as below:
```
$ sudo rpm -e tdengine
TDengine is removed successfully!
```
RPM package of taosTools can be uninstalled as below:
```
sudo rpm -e taostools
taosToole is removed successfully!
```
</TabItem>
<TabItem label="Uninstall tar.gz" value="taruninst">
tar.gz package of TDengine can be uninstalled as below:
```
$ rmtaos
TDengine is removed successfully!
```
tar.gz package of taosTools can be uninstalled as below:
```
$ rmtaostools
Start to uninstall taos tools ...
taos tools is uninstalled successfully!
```
</TabItem>
<TabItem label="Windows uninstall" value="windows">
Run C:\TDengine\unins000.exe to uninstall TDengine on a Windows system.
</TabItem>
<TabItem label="Mac uninstall" value="mac">
TDengine can be uninstalled as below:
```
$ rmtaos
TDengine is removed successfully!
```
</TabItem>
</Tabs>
:::info
- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine. The packages may affect each other and cause errors.
- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information.
```
$ sudo rm -f /var/lib/dpkg/info/tdengine*
```
You can then reinstall if needed.
- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information.
```
$ sudo rpm -e --noscripts tdengine
```
You can then reinstall if needed.
:::
Uninstalling and Modifying Files
- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data.
- When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used.
## Upgrade
There are two aspects in upgrade operation: upgrade installation package and upgrade a running server.
To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version.
Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 2 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
- Stop inserting data
- Make sure all data is persisted to disk, please use command `flush database`
- Stop the cluster of TDengine
- Uninstall old version and install new version
- Start the cluster of TDengine
- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss
- Run some simple data insertion statements to make sure the cluster works well
- Restore business services
:::warning
TDengine doesn't guarantee any lower version is compatible with the data generated by a higher version, so it's never recommended to downgrade the version.
:::

View File

@ -41,8 +41,6 @@ An existing Grafana Notification Channel can be specified with parameter `-E`, t
Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
For more use cases and restrictions please refer to [TDinsight](/reference/tdinsight/).
## log database
The data of tdinsight dashboard is stored in `log` database (default. You can change it in taoskeeper's config file. For more infrmation, please reference to [taoskeeper document](/reference/taosKeeper)). The taoskeeper will create log database on taoskeeper startup.
@ -106,22 +104,22 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|uptime|FLOAT||dnode uptime|
|uptime|FLOAT||dnode uptime in `days`|
|cpu\_engine|FLOAT||cpu usage of tdengine. read from `/proc/<taosd_pid>/stat`|
|cpu\_system|FLOAT||cpu usage of server. read from `/proc/stat`|
|cpu\_cores|FLOAT||cpu cores of server|
|mem\_engine|INT||memory usage of tdengine. read from `/proc/<taosd_pid>/status`|
|mem\_system|INT||available memory on the server|
|mem\_system|INT||available memory on the server in `KB`|
|mem\_total|INT||total memory of server in `KB`|
|disk\_engine|INT|||
|disk\_used|BIGINT||usage of data dir in `bytes`|
|disk\_total|BIGINT||the capacity of data dir in `bytes`|
|net\_in|FLOAT||network throughput rate in kb/s. read from `/proc/net/dev`|
|net\_out|FLOAT||network throughput rate in kb/s. read from `/proc/net/dev`|
|io\_read|FLOAT||io throughput rate in kb/s. read from `/proc/<taosd_pid>/io`|
|io\_write|FLOAT||io throughput rate in kb/s. read from `/proc/<taosd_pid>/io`|
|io\_read\_disk|FLOAT||io throughput rate of disk in kb/s. read from `/proc/<taosd_pid>/io`|
|io\_write\_disk|FLOAT||io throughput rate of disk in kb/s. read from `/proc/<taosd_pid>/io`|
|net\_in|FLOAT||network throughput rate in byte/s. read from `/proc/net/dev`|
|net\_out|FLOAT||network throughput rate in byte/s. read from `/proc/net/dev`|
|io\_read|FLOAT||io throughput rate in byte/s. read from `/proc/<taosd_pid>/io`|
|io\_write|FLOAT||io throughput rate in byte/s. read from `/proc/<taosd_pid>/io`|
|io\_read\_disk|FLOAT||io throughput rate of disk in byte/s. read from `/proc/<taosd_pid>/io`|
|io\_write\_disk|FLOAT||io throughput rate of disk in byte/s. read from `/proc/<taosd_pid>/io`|
|req\_select|INT||number of select queries received per dnode|
|req\_select\_rate|FLOAT||number of select queries received per dnode divided by monitor interval.|
|req\_insert|INT||number of insert queries received per dnode|
@ -150,9 +148,9 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|ts|TIMESTAMP||timestamp|
|name|NCHAR||data directory. default is `/var/lib/taos`|
|level|INT||level for multi-level storage|
|avail|BIGINT||available space for data directory|
|used|BIGINT||used space for data directory|
|total|BIGINT||total space for data directory|
|avail|BIGINT||available space for data directory in `bytes`|
|used|BIGINT||used space for data directory in `bytes`|
|total|BIGINT||total space for data directory in `bytes`|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
@ -165,9 +163,9 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||log directory. default is `/var/log/taos/`|
|avail|BIGINT||available space for log directory|
|used|BIGINT||used space for data directory|
|total|BIGINT||total space for data directory|
|avail|BIGINT||available space for log directory in `bytes`|
|used|BIGINT||used space for data directory in `bytes`|
|total|BIGINT||total space for data directory in `bytes`|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
@ -180,9 +178,9 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||temp directory. default is `/tmp/`|
|avail|BIGINT||available space for temp directory|
|used|BIGINT||used space for temp directory|
|total|BIGINT||total space for temp directory|
|avail|BIGINT||available space for temp directory in `bytes`|
|used|BIGINT||used space for temp directory in `bytes`|
|total|BIGINT||total space for temp directory in `bytes`|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|

View File

@ -1093,7 +1093,7 @@ TaosConsumer consumer = new TaosConsumer<>(config);
- httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
- messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
- httpPoolSize: Maximum number of concurrent requests on the a connection。It only takes effect when using WebSocket type.
- For more information, see [Consumer Parameters](../../../develop/tmq).
- For more information, see [Consumer Parameters](../../../develop/tmq). Note that the default value of auto.offset.reset in data subscription on the TDengine server has changed since version 3.2.0.0.
#### Subscribe to consume data
@ -1193,7 +1193,7 @@ public abstract class ConsumerLoop {
config.setProperty("bootstrap.servers", "localhost:6030");
config.setProperty("td.connect.user", "root");
config.setProperty("td.connect.pass", "taosdata");
config.setProperty("auto.offset.reset", "earliest");
config.setProperty("auto.offset.reset", "latest");
config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true");
config.setProperty("auto.commit.interval.ms", "1000");
@ -1276,7 +1276,7 @@ public abstract class ConsumerLoop {
config.setProperty("bootstrap.servers", "localhost:6041");
config.setProperty("td.connect.user", "root");
config.setProperty("td.connect.pass", "taosdata");
config.setProperty("auto.offset.reset", "earliest");
config.setProperty("auto.offset.reset", "latest");
config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true");
config.setProperty("auto.commit.interval.ms", "1000");

View File

@ -794,7 +794,7 @@ The TDengine Go Connector supports subscription functionality with the following
```go
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
"group.id": "test",
"auto.offset.reset": "earliest",
"auto.offset.reset": "latest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
@ -870,6 +870,7 @@ package main
import (
"fmt"
"os"
"time"
"github.com/taosdata/driver-go/v3/af"
"github.com/taosdata/driver-go/v3/af/tmq"
@ -890,19 +891,16 @@ func main() {
if err != nil {
panic(err)
}
if err != nil {
panic(err)
}
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
"group.id": "test",
"auto.offset.reset": "earliest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"td.connect.port": "6030",
"client.id": "test_tmq_client",
"enable.auto.commit": "false",
"msg.with.table.name": "true",
"group.id": "test",
"auto.offset.reset": "latest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"td.connect.port": "6030",
"client.id": "test_tmq_client",
"enable.auto.commit": "false",
"msg.with.table.name": "true",
})
if err != nil {
panic(err)
@ -915,10 +913,16 @@ func main() {
if err != nil {
panic(err)
}
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
if err != nil {
panic(err)
}
go func() {
for {
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
if err != nil {
panic(err)
}
time.Sleep(time.Millisecond * 100)
}
}()
for i := 0; i < 5; i++ {
ev := consumer.Poll(500)
if ev != nil {
@ -972,6 +976,7 @@ package main
import (
"database/sql"
"fmt"
"time"
"github.com/taosdata/driver-go/v3/common"
tmqcommon "github.com/taosdata/driver-go/v3/common/tmq"
@ -995,7 +1000,7 @@ func main() {
"td.connect.pass": "taosdata",
"group.id": "example",
"client.id": "example_consumer",
"auto.offset.reset": "earliest",
"auto.offset.reset": "latest",
})
if err != nil {
panic(err)
@ -1004,29 +1009,34 @@ func main() {
if err != nil {
panic(err)
}
_, err = db.Exec("create table example_ws_tmq.t_all(ts timestamp," +
"c1 bool," +
"c2 tinyint," +
"c3 smallint," +
"c4 int," +
"c5 bigint," +
"c6 tinyint unsigned," +
"c7 smallint unsigned," +
"c8 int unsigned," +
"c9 bigint unsigned," +
"c10 float," +
"c11 double," +
"c12 binary(20)," +
"c13 nchar(20)" +
")")
if err != nil {
panic(err)
}
go func() {
_, err := db.Exec("create table example_ws_tmq.t_all(ts timestamp," +
"c1 bool," +
"c2 tinyint," +
"c3 smallint," +
"c4 int," +
"c5 bigint," +
"c6 tinyint unsigned," +
"c7 smallint unsigned," +
"c8 int unsigned," +
"c9 bigint unsigned," +
"c10 float," +
"c11 double," +
"c12 binary(20)," +
"c13 nchar(20)" +
")")
if err != nil {
panic(err)
}
_, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')")
if err != nil {
panic(err)
for {
_, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')")
if err != nil {
panic(err)
}
time.Sleep(time.Millisecond * 100)
}
}()
for i := 0; i < 5; i++ {
ev := consumer.Poll(500)

View File

@ -442,7 +442,7 @@ The following parameters can be configured for the TMQ DSN. Only `group.id` is m
- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis.
- `client.id`: Subscriber client ID.
- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group.
- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default value varies depending on the TDengine version. For details, see [Data Subscription](https://docs.tdengine.com/develop/tmq/). Note: This parameter is set per consumer group.
- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential.
- `auto.commit.interval.ms`: Interval for automatic commits.

View File

@ -31,11 +31,13 @@ We recommend using the latest version of `taospy`, regardless of the version of
|Python Connector Version|major changes|
|:-------------------:|:----:|
|2.7.12|1. added support for `varbinary` type (STMT does not yet support)<br/> 2. improved query performance (thanks to contributor [hadrianl](https://github.com/taosdata/taos-connector-python/pull/209))|
|2.7.9|support for getting assignment and seek function on subscription|
|2.7.8|add `execute_many` method|
|Python Websocket Connector Version|major changes|
|:----------------------------:|:-----:|
|0.2.9|bugs fixes|
|0.2.5|1. support for getting assignment and seek function on subscription <br/> 2. support schemaless <br/> 3. support STMT|
|0.2.4|support `unsubscribe` on subscription|
@ -1023,10 +1025,6 @@ Due to the current imperfection of Python's nanosecond support (see link below),
1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds
2. https://www.python.org/dev/peps/pep-0564/
## Important Update
[**Release Notes**] (https://github.com/taosdata/taos-connector-python/releases)
## API Reference
- [taos](https://docs.taosdata.com/api/taospy/taos/)

View File

@ -52,8 +52,6 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
&& tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1
```
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please find available versions in [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
**Non-Swoole Environment: **
```shell

View File

@ -4,7 +4,6 @@ import PkgListV3 from "/components/PkgListV3";
<PkgListV3 type={1} sys="Linux" />
[All Downloads](../../releases/tdengine)
2. Unzip

View File

@ -4,8 +4,6 @@ import PkgListV3 from "/components/PkgListV3";
<PkgListV3 type={8} sys="macOS" />
[All Downloads](../../releases/tdengine)
2. Execute the installer, select the default value as prompted, and complete the installation. If the installation is blocked, you can right-click or ctrl-click on the installation package and select `Open`.
3. configure taos.cfg

View File

@ -3,8 +3,6 @@ import PkgListV3 from "/components/PkgListV3";
1. Download the client installation package
<PkgListV3 type={4} sys="Windows" />
[All Downloads](../../releases/tdengine)
2. Execute the installer, select the default value as prompted, and complete the installation
3. Installation path

View File

@ -31,7 +31,7 @@ taosAdapter provides the following features.
### Install taosAdapter
If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine 3.0 released versions](../../releases/tdengine) to download the TDengine server installation package. If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/3.0/BUILD.md) documentation.
If you use the TDengine server, you don't need additional steps to install taosAdapter. If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/3.0/BUILD.md) documentation.
### Start/Stop taosAdapter
@ -180,7 +180,7 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl
node_export is an exporter for machine metrics. Please visit [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) for more information.
- Support for Prometheus remote_read and remote_write
remote_read and remote_write are interfaces for Prometheus data read and write from/to other data storage solution. Please visit [https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) for more information.
- Get table's VGroup ID. For more information about VGroup, please refer to [primary-logic-unit](/tdinternal/arch/#primary-logic-unit).
- Get table's VGroup ID.
## Interfaces
@ -246,7 +246,7 @@ node_export is an exporter of hardware and OS metrics exposed by the \*NIX kerne
### Get table's VGroup ID
You can call `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` to get table's VGroup ID. For more information about VGroup, please refer to [primary-logic-unit](/tdinternal/arch/#primary-logic-unit).
You can call `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` to get table's VGroup ID.
## Memory usage optimization methods

View File

@ -13,7 +13,7 @@ taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDen
There are two ways to install taosBenchmark:
- Installing the official TDengine installer will automatically install taosBenchmark. Please refer to [TDengine installation](../../operation/pkg-install) for details.
- Installing the official TDengine installer will automatically install taosBenchmark.
- Compile taos-tools separately and install them. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details.
@ -397,6 +397,7 @@ The configuration parameters for specifying super table tag columns and data col
### Query scenario configuration parameters
`filetype` must be set to `query` in the query scenario.
`query_times` is number of times queries were run.
To control the query scenario by setting `kill_slow_query_threshold` and `kill_slow_query_interval` parameters to kill the execution of slow query statements. Threshold controls exec_usec of query command will be killed by taosBenchmark after the specified time, in seconds; interval controls sleep time to avoid continuous querying of slow queries consuming CPU in seconds.

View File

@ -103,7 +103,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
use letter and number only. Default is NOT.
-n, --no-escape No escape char '`'. Default is using it.
-Q, --dot-replace Repalce dot character with underline character in
the table name.
the table name.(Version 2.5.3)
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
8.
-C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service
@ -113,6 +113,10 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
-?, --help Give this help list
--usage Give a short usage message
-V, --version Print program version
-W, --rename=RENAME-LIST Rename database name with new name during
importing data. RENAME-LIST:
"db1=newDB1|db2=newDB2" means rename db1 to newDB1
and rename db2 to newDB2 (Version 2.5.4)
Mandatory or optional arguments to long options are also mandatory or optional
for any corresponding short options.

View File

@ -652,6 +652,15 @@ The charset that takes effect is UTF-8.
| Type | String |
| Default Value | None |
### smlAutoChildTableNameDelimiter
| Attribute | Description |
| ------------- | ------------------------------------------ |
| Applicable | Client only |
| Meaning | Delimiter between tags as table name|
| Type | String |
| Default Value | None |
### smlTagName
| Attribute | Description |

View File

@ -93,6 +93,8 @@ Note that tag_key1, tag_key2 are not the original order of the tags entered by t
The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t\_" is a fixed prefix that every table generated by this mapping relationship has.
:::
If you do not want to use an automatically generated table name, there are two ways to specify sub table names, the first one has a higher priority.
You can configure smlAutoChildTableNameDelimiter in taos.cfg, for example, `smlAutoChildTableNameDelimiter=tname`. You can insert `st,t0=cpul,t1=4 c1=3 1626006833639000000` and the table name will be cpu1-4.
You can configure smlChildTableName in taos.cfg to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
2. If the super table obtained by parsing the line protocol does not exist, this super table is created.

View File

@ -16,7 +16,7 @@ taosKeeper is a tool for TDengine that exports monitoring metrics. With taosKeep
There are two ways to install taosKeeper:
Methods of installing taosKeeper:
- Installing the official TDengine installer will automatically install taosKeeper. Please refer to [TDengine installation](../../operation/pkg-install) for details.
- Installing the official TDengine installer will automatically install taosKeeper.
- You can compile taosKeeper separately and install it. Please refer to the [taosKeeper](https://github.com/taosdata/taoskeeper) repository for details.
## Configuration and Launch

View File

@ -21,7 +21,7 @@ TDengine Source Connector is used to read data from TDengine in real-time and se
1. Linux operating system
2. Java 8 and Maven installed
3. Git/curl/vi is installed
4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](../../operation/pkg-install)
4. TDengine is installed and started.
## Install Kafka

View File

@ -10,76 +10,60 @@ description: How to use Seeq and TDengine to perform time series data analysis
Seeq is an advanced analytics software for the manufacturing industry and the Industrial Internet of Things (IIoT). Seeq supports the use of machine learning innovations within process manufacturing organizations. These capabilities enable organizations to deploy their own or third-party machine learning algorithms into advanced analytics applications used by frontline process engineers and subject matter experts, thus extending the efforts of a single data scientist to many frontline workers.
With the TDengine Java connector, Seeq effortlessly supports querying time series data provided by TDengine and offers functionalities such as data visualization, analysis, and forecasting.
TDengine can be added as a data source into Seeq via JDBC connector. Once data source is configured, Seeq can read data from TDengine and offers functionalities such as data visualization, analysis, and forecasting.
### Install Seeq
## Prerequisite
Please download Seeq Server and Seeq Data Lab software installation package from the [Seeq official website](https://www.seeq.com/customer-download).
1. Install Seeq Server and Seeq Data Lab software
2. Install TDengine or register TDengine Cloud service
### Install and start Seeq Server
```
tar xvzf seeq-server-xxx.tar.gz
cd seeq-server-installer
sudo ./install
sudo seeq service enable
sudo seeq start
```
### Install and start Seeq Data Lab Server
Seeq Data Lab needs to be installed on a separate server from Seeq Server and connected to Seeq Server through configuration. For detailed installation and configuration instructions, please refer to [the official documentation](https://support.seeq.com/space/KB/1034059842).
```
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq
sudo seeq config set Network/DataLab/Hostname localhost
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231)
sudo seeq config set Network/Hostname <value> # the host IP or URL of the main Seeq Server
# If the main Seeq server is configured to listen over HTTPS
sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443)
# If the main Seeq server is NOT configured to listen over HTTPS
sudo seeq config set Network/Webserver/Port <value>
#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server:
sudo seeq config set Network/DataLab/Hostname <value> # the host IP (not URL) of the Data Lab server
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231
```
### Install TDengine on-premise instance
See [Quick Install from Package](../../get-started).
### Or use TDengine Cloud
Register for a [TDengine Cloud](https://cloud.tdengine.com) account and log in to your account.
## Make Seeq be able to access TDengine
1. Get data location configuration
## Install TDengine JDBC connector
1. Get Seeq data location configuration
```
sudo seeq config get Folders/Data
```
2. Download TDengine Java connector from maven.org. Please use the latest version (Current is 3.2.5, https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar).
2. Download the latest TDengine Java connector from maven.org (current is version is [3.2.5](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar)), and copy the JAR file into the_directory_found_in_step_1/plugins/lib/
3. Restart Seeq server
```
sudo seeq restart
```
4. Input License
## Add TDengine into Seeq's data source
1. Open Seeq, login as admin, go to Administration, click "Add Data Source"
2. For connector, choose SQL connector v2
3. Inside "Additional Configuration" input box, copy and paste the following
Use a browser to access ip:34216 and input the license according to the guide.
```
{
"QueryDefinitions": []
"Type": "GENERIC",
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": null,
"Password": null,
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
"UseWindowsAuth": false,
"SqlFetchBatchSize": 100000,
"UseSSL": false,
"JdbcProperties": null,
"GenericDatabaseConfig": {
"DatabaseJdbcUrl": "jdbc:TAOS-RS://localhost:6030/?user=root&password=taosdata",
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
"ResolutionInNanoseconds": 1000,
"ZonedColumnTypes": []
}
}
```
## How to use Seeq to analyze time-series data that TDengine serves
Note: You need to replace DatabaseJdbcUrl with your setting. Please login TDengine cloud or open taosExplorer for enterprise edition, click programming -> Java to find yours. For the "QueryDefintions", please follow the examples below to write your own.
This chapter demonstrates how to use Seeq software in conjunction with TDengine for time series data analysis.
## Use Seeq to analyze time-series data stored inside TDengine
This chapter demonstrates how to use Seeq with TDengine for time series data analysis.
### Scenario Overview
@ -150,8 +134,8 @@ Please login with Seeq administrator and create a few data sources as following.
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"Username": null,
"Password": null,
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
@ -210,8 +194,8 @@ Please login with Seeq administrator and create a few data sources as following.
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"Username": null,
"Password": null,
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
@ -269,8 +253,8 @@ Please login with Seeq administrator and create a few data sources as following.
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"Username": null,
"Password": null,
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
@ -289,13 +273,13 @@ Please login with Seeq administrator and create a few data sources as following.
#### Launch Seeq Workbench
Please login to Seeq server with IP:port and create a new Seeq Workbench, then select data sources and choose the correct tools to do data visualization and analysis. Please refer to [the official documentation](https://support.seeq.com/space/KB/146440193/Seeq+Workbench) for the details.
Please login to Seeq server and create a new Seeq Workbench, then select data sources and choose the correct tools to do data visualization and analysis. Please refer to [the official documentation](https://support.seeq.com/space/KB/146440193/Seeq+Workbench) for the details.
![Seeq Workbench](./seeq/seeq-demo-workbench.webp)
#### Use Seeq Data Lab Server for advanced data analysis
Please login to the Seeq service with IP:port and create a new Seeq Data Lab. Then you can use advanced tools including Python environment and machine learning add-ons for more complex analysis.
Please login to the Seeq service and create a new Seeq Data Lab. Then you can use advanced tools including Python environment and machine learning add-ons for more complex analysis.
```Python
from seeq import spy
@ -370,13 +354,15 @@ Please note that when using TDengine Cloud, you need to specify the database nam
#### The data source of TDengine Cloud example
This data source contains the data from a smart meter in public database smartmeters.
```
{
"QueryDefinitions": [
{
"Name": "CloudVoltage",
"Type": "SIGNAL",
"Sql": "SELECT ts, voltage FROM test.meters",
"Sql": "SELECT ts, voltage FROM smartmeters.d1000",
"Enabled": true,
"TestMode": false,
"TestQueriesDuringSync": true,
@ -409,8 +395,8 @@ Please note that when using TDengine Cloud, you need to specify the database nam
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"Username": null,
"Password": null,
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
@ -419,7 +405,7 @@ Please note that when using TDengine Cloud, you need to specify the database nam
"UseSSL": false,
"JdbcProperties": null,
"GenericDatabaseConfig": {
"DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.cloud.taosdata.com?useSSL=true&token=41ac9d61d641b6b334e8b76f45f5a8XXXXXXXXXX",
"DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.us-west-2.aws.cloud.tdengine.com?useSSL=true&token=42b874395452d36f38dd6bf4317757611b213683",
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
"ResolutionInNanoseconds": 1000,
"ZonedColumnTypes": []
@ -433,8 +419,8 @@ Please note that when using TDengine Cloud, you need to specify the database nam
## Conclusion
By integrating Seeq and TDengine, it is possible to leverage the efficient storage and querying performance of TDengine while also benefiting from Seeq's powerful data visualization and analysis capabilities provided to users.
By integrating Seeq and TDengine, you can leverage the efficient storage and querying performance of TDengine while also benefiting from Seeq's powerful data visualization and analysis capabilities provided to users.
This integration allows users to take advantage of TDengine's high-performance time-series data storage and retrieval, ensuring efficient handling of large volumes of data. At the same time, Seeq provides advanced analytics features such as data visualization, anomaly detection, correlation analysis, and predictive modeling, enabling users to gain valuable insights and make data-driven decisions.
This integration allows users to take advantage of TDengine's high-performance time-series data storage and query, ensuring efficient handling of large volumes of data. At the same time, Seeq provides advanced analytics features such as data visualization, anomaly detection, correlation analysis, and predictive modeling, enabling users to gain valuable insights and make data-driven decisions.
Together, Seeq and TDengine provide a comprehensive solution for time series data analysis in diverse industries such as manufacturing, IIoT, and power systems. The combination of efficient data storage and advanced analytics empowers users to unlock the full potential of their time series data, driving operational improvements, and enabling predictive and prescriptive analytics applications.

View File

@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://t
import Release from "/components/ReleaseV3";
## 3.2.0.0
<Release type="tdengine" version="3.2.0.0" />
## 3.1.1.0
<Release type="tdengine" version="3.1.1.0" />

View File

@ -3,6 +3,7 @@ package main
import (
"fmt"
"os"
"time"
"github.com/taosdata/driver-go/v3/af"
"github.com/taosdata/driver-go/v3/af/tmq"
@ -27,15 +28,15 @@ func main() {
panic(err)
}
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
"group.id": "test",
"auto.offset.reset": "earliest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"td.connect.port": "6030",
"client.id": "test_tmq_client",
"enable.auto.commit": "false",
"msg.with.table.name": "true",
"group.id": "test",
"auto.offset.reset": "latest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"td.connect.port": "6030",
"client.id": "test_tmq_client",
"enable.auto.commit": "false",
"msg.with.table.name": "true",
})
if err != nil {
panic(err)
@ -48,12 +49,17 @@ func main() {
if err != nil {
panic(err)
}
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
if err != nil {
panic(err)
}
go func() {
for {
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
if err != nil {
panic(err)
}
time.Sleep(time.Microsecond * 100)
}
}()
for i := 0; i < 5; i++ {
ev := consumer.Poll(0)
ev := consumer.Poll(500)
if ev != nil {
switch e := ev.(type) {
case *tmqcommon.DataMessage:

View File

@ -66,7 +66,6 @@ public class SubscribeDemo {
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
"com.taos.example.MetersDeserializer");
properties.setProperty(TMQConstants.VALUE_DESERIALIZER_ENCODING, "UTF-8");
properties.setProperty(TMQConstants.EXPERIMENTAL_SNAPSHOT_ENABLE, "true");
// poll data
try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) {

View File

@ -66,7 +66,6 @@ public class WebsocketSubscribeDemo {
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
"com.taos.example.MetersDeserializer");
properties.setProperty(TMQConstants.VALUE_DESERIALIZER_ENCODING, "UTF-8");
properties.setProperty(TMQConstants.EXPERIMENTAL_SNAPSHOT_ENABLE, "true");
// poll data
try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) {

View File

@ -23,9 +23,6 @@ def taos_get_assignment_and_seek_demo():
consumer = Consumer(
{
"group.id": "0",
# should disable snapshot,
# otherwise it will cause invalid params error
"experimental.snapshot.enable": "false",
}
)
consumer.subscribe(["tmq_assignment_demo_topic"])

View File

@ -21,9 +21,6 @@ def taosws_get_assignment_and_seek_demo():
prepare()
consumer = taosws.Consumer(conf={
"td.connect.websocket.scheme": "ws",
# should disable snapshot,
# otherwise it will cause invalid params error
"experimental.snapshot.enable": "false",
"group.id": "0",
})
consumer.subscribe(["tmq_assignment_demo_topic"])

View File

@ -4,20 +4,14 @@ description: 简要介绍 TDengine 的主要功能
toc_max_heading_level: 2
---
TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/)且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本
TDengine 是一款专为物联网、工业互联网等场景设计并优化的大数据平台,它能安全高效地将大量设备、数据采集器每天产生的高达 TB 甚至 PB 级的数据进行汇聚、存储、分析和分发,对业务运行状态进行实时监测、预警,提供实时的商业洞察。其核心模块是高性能、集群开源、云原生、极简的时序数据库 TDengine OSS
本章节介绍 TDengine 的主要产品和功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
## 主要产品
TDengine 有三个主要产品TDengine Enterprise (即 TDengine 企业版TDengine Cloud和 TDengine OSS关于它们的具体定义请参考
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
- [TDengine 开源版](https://www.taosdata.com/tdengine-oss)
本节介绍 TDengine OSS 的主要产品和功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine OSS 有个整体了解
## 主要功能
TDengine 的主要功能如下:
TDengine OSS 的主要功能如下:
1. 写入数据,支持
- [SQL 写入](../develop/insert-data/sql-writing)
@ -150,3 +144,10 @@ TDengine 的主要功能如下:
- [TDengine VS InfluxDB ,写入性能大 PK ](https://www.taosdata.com/2021/11/05/3248.html)
- [TDengine 和 InfluxDB 查询性能对比测试报告](https://www.taosdata.com/2022/02/22/5969.html)
- [TDengine 与 InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse 等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf)
## 主要产品
TDengine 有两个主要产品TDengine Enterprise (即 TDengine 企业版)和 TDengine Cloud关于它们的具体定义请参考
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)

View File

@ -4,7 +4,7 @@ description: '快速设置 TDengine 环境并体验其高效写入和查询'
---
import xiaot from './xiaot.webp'
import xiaot_new from './xiaot-03.webp'
import xiaot_new from './xiaot-20231007.png'
import channel from './channel.webp'
import official_account from './official-account.webp'

Binary file not shown.

After

Width:  |  Height:  |  Size: 112 KiB

View File

@ -38,7 +38,10 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
- field_set 中的每个数据项都需要对自身的数据类型进行描述, 比如 1.2f32 代表 FLOAT 类型的数值 1.2, 如果不带类型后缀会被当作 DOUBLE 处理
- timestamp 支持多种时间精度。写入数据的时候需要用参数指定时间精度,支持从小时到纳秒的 6 种时间精度
- 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field后面的数据按照这个顺序如果顺序不一样需要配置参数 smlDataFormat 为 false否则数据写入按照相同顺序写入库中数据会异常。3.0.1.3 之后的版本 smlDataFormat 默认为 false从3.0.3.0开始,该配置废弃) [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
- 默认产生的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
- 子表名生成规则
- 默认产生的子表名是根据规则生成的唯一 ID 值。
- 用户也可以通过在client端的 taos.cfg 里配置 smlAutoChildTableNameDelimiter 参数来指定连接标签之间的分隔符,连接起来后作为子表名。举例如下:配置 smlAutoChildTableNameDelimiter=-, 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1-4。
- 用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
:::

View File

@ -31,8 +31,11 @@ OpenTSDB 行协议同样采用一行字符串来表示一行数据。OpenTSDB
```txt
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
```
- 子表名生成规则
- 默认产生的子表名是根据规则生成的唯一 ID 值。
- 用户也可以通过在client端的 taos.cfg 里配置 smlAutoChildTableNameDelimiter 参数来指定连接标签之间的分隔符,连接起来后作为子表名。举例如下:配置 smlAutoChildTableNameDelimiter=-, 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1-4。
- 用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
- 默认生产的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 meters.current 1648432611250 11.3 tname=cpu1 location=California.LosAngeles groupid=3 则创建的表名为 cpu1注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
参考 [OpenTSDB Telnet API 文档](http://opentsdb.net/docs/build/html/api_telnet/put.html)。
## 示例代码

View File

@ -47,7 +47,10 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据
:::note
- 对于 JSON 格式协议TDengine 并不会自动把所有标签转成 NCHAR 类型, 字符串将将转为 NCHAR 类型, 数值将同样转换为 DOUBLE 类型。
- 默认生成的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
- 子表名生成规则
- 默认产生的子表名是根据规则生成的唯一 ID 值。
- 用户也可以通过在client端的 taos.cfg 里配置 smlAutoChildTableNameDelimiter 参数来指定连接标签之间的分隔符,连接起来后作为子表名。举例如下:配置 smlAutoChildTableNameDelimiter=-, 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1-4。
- 用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
:::

View File

@ -63,17 +63,17 @@ import CDemo from "./_sub_c.mdx";
typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param));
typedef enum tmq_conf_res_t {
TMQ_CONF_UNKNOWN = -2,
TMQ_CONF_INVALID = -1,
TMQ_CONF_OK = 0,
} tmq_conf_res_t;
TMQ_CONF_UNKNOWN = -2,
TMQ_CONF_INVALID = -1,
TMQ_CONF_OK = 0,
} tmq_conf_res_t;
typedef struct tmq_topic_assignment {
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end;
} tmq_topic_assignment;
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end;
} tmq_topic_assignment;
DLL_EXPORT tmq_conf_t *tmq_conf_new();
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
@ -106,7 +106,7 @@ import CDemo from "./_sub_c.mdx";
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res);
DLL_EXPORT const char *tmq_err2str(int32_t code);DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
DLL_EXPORT const char *tmq_err2str(int32_t code);
```
下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
@ -351,10 +351,11 @@ CREATE TOPIC topic_name [with meta] AS DATABASE db_name;
| `td.connect.port` | integer | 服务端的端口号 | |
| `group.id` | string | 消费组 ID同一消费组共享消费进度 | <br />**必填项**。最大长度192。<br />每个topic最多可建立100个 consumer group |
| `client.id` | string | 客户端 ID | 最大长度192。 |
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default;从头开始订阅; <br/>`latest`: 仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default(version < 3.2.0.0);从头开始订阅; <br/>`latest`: default(version >= 3.2.0.0);仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
| `enable.auto.commit` | boolean | 是否启用消费位点自动提交true: 自动提交客户端应用无需commitfalse客户端应用需要自行commit | 默认值为 true |
| `auto.commit.interval.ms` | integer | 消费记录自动提交消费位点时间间隔,单位为毫秒 | 默认值为 5000 |
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句) |默认关闭 |
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句从3.2.0.0版本该参数废弃恒为true |默认关闭 |
| `enable.replay` | boolean | 是否开启数据回放功能 |默认关闭 |
对于不同编程语言,其设置方式如下:
@ -370,7 +371,7 @@ tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
tmq_conf_set(conf, "group.id", "cgrpName");
tmq_conf_set(conf, "td.connect.user", "root");
tmq_conf_set(conf, "td.connect.pass", "taosdata");
tmq_conf_set(conf, "auto.offset.reset", "earliest");
tmq_conf_set(conf, "auto.offset.reset", "latest");
tmq_conf_set(conf, "msg.with.table.name", "true");
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
@ -400,7 +401,7 @@ properties.setProperty("group.id", "cgrpName");
properties.setProperty("bootstrap.servers", "127.0.0.1:6030");
properties.setProperty("td.connect.user", "root");
properties.setProperty("td.connect.pass", "taosdata");
properties.setProperty("auto.offset.reset", "earliest");
properties.setProperty("auto.offset.reset", "latest");
properties.setProperty("msg.with.table.name", "true");
properties.setProperty("value.deserializer", "com.taos.example.MetersDeserializer");
@ -420,7 +421,7 @@ public class MetersDeserializer extends ReferenceDeserializer<Meters> {
```go
conf := &tmq.ConfigMap{
"group.id": "test",
"auto.offset.reset": "earliest",
"auto.offset.reset": "latest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
@ -440,7 +441,7 @@ consumer, err := NewConsumer(conf)
let mut dsn: Dsn = "taos://".parse()?;
dsn.set("group.id", "group1");
dsn.set("client.id", "test");
dsn.set("auto.offset.reset", "earliest");
dsn.set("auto.offset.reset", "latest");
let tmq = TmqBuilder::from_dsn(dsn)?;
@ -459,7 +460,19 @@ from taos.tmq import Consumer
# Syntax: `consumer = Consumer(configs)`
#
# Example:
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
consumer = Consumer(
{
"group.id": "local",
"client.id": "1",
"enable.auto.commit": "true",
"auto.commit.interval.ms": "1000",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"auto.offset.reset": "latest",
"msg.with.table.name": "true",
}
)
```
</TabItem>
@ -476,7 +489,7 @@ let consumer = taos.consumer({
'group.id': 'tg2',
'td.connect.user': 'root',
'td.connect.pass': 'taosdata',
'auto.offset.reset','earliest',
'auto.offset.reset','latest',
'msg.with.table.name': 'true',
'td.connect.ip','127.0.0.1',
'td.connect.port','6030'
@ -499,7 +512,7 @@ var cfg = new ConsumerConfig
GourpId = "TDengine-TMQ-C#",
TDConnectUser = "root",
TDConnectPasswd = "taosdata",
AutoOffsetReset = "earliest"
AutoOffsetReset = "latest"
MsgWithTableName = "true",
TDConnectIp = "127.0.0.1",
TDConnectPort = "6030"
@ -515,6 +528,24 @@ var consumer = new ConsumerBuilder(cfg).Build();
上述配置中包括 consumer group ID如果多个 consumer 指定的 consumer group ID 一样,则自动形成一个 consumer group共享消费进度。
数据回放功能说明:
- 订阅增加 replay 功能,按照数据写入的时间回放。
比如,如下时间写入三条数据
```sql
2023/09/22 00:00:00.000
2023/09/22 00:00:05.000
2023/09/22 00:00:08.000
```
则订阅出第一条数据 5s 后返回第二条数据,获取第二条数据 3s 后返回第三条数据。
- 仅列订阅支持数据回放
- 回放需要保证独立时间线
- 如果是子表订阅或者普通表订阅只有一个vnode上有数据保证是一个时间线
- 如果超级表订阅,则需保证该 DB 只有一个vnode否则报错因为多个vnode上订阅出的数据不在一个时间线上
- 超级表和库订阅不支持回放
- 增加 enable.replay 参数true表示开启订阅回放功能false表示不开启订阅回放功能默认不开启。
- 回放不支持进度保存,所以回放参数 enable.replay = true 时auto commit 自动关闭
- 因为数据回放本身需要处理时间所以回放的精度存在几十ms的误差
## 订阅 *topics*
一个 consumer 支持同时订阅多个 topic。

View File

@ -1095,7 +1095,8 @@ TaosConsumer consumer = new TaosConsumer<>(config);
- httpConnectTimeout: 创建连接超时参数,单位 ms默认为 5000 ms。仅在 WebSocket 连接下有效。
- messageWaitTimeout: 数据传输超时参数,单位 ms默认为 10000 ms。仅在 WebSocket 连接下有效。
- httpPoolSize: 同一个连接下最大并行请求数。仅在 WebSocket 连接下有效。
其他参数请参考:[Consumer 参数列表](../../develop/tmq#创建-consumer-以及consumer-group)
其他参数请参考:[Consumer 参数列表](../../develop/tmq#创建-consumer-以及consumer-group) 注意TDengine服务端自3.2.0.0版本开始消息订阅中的auto.offset.reset默认值发生变化。
#### 订阅消费数据
@ -1193,7 +1194,7 @@ public abstract class ConsumerLoop {
config.setProperty("bootstrap.servers", "localhost:6030");
config.setProperty("td.connect.user", "root");
config.setProperty("td.connect.pass", "taosdata");
config.setProperty("auto.offset.reset", "earliest");
config.setProperty("auto.offset.reset", "latest");
config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true");
config.setProperty("auto.commit.interval.ms", "1000");
@ -1201,7 +1202,6 @@ public abstract class ConsumerLoop {
config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
config.setProperty("value.deserializer.encoding", "UTF-8");
config.setProperty("experimental.snapshot.enable", "true");
this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed");
@ -1279,7 +1279,7 @@ public abstract class ConsumerLoop {
config.setProperty("bootstrap.servers", "localhost:6041");
config.setProperty("td.connect.user", "root");
config.setProperty("td.connect.pass", "taosdata");
config.setProperty("auto.offset.reset", "earliest");
config.setProperty("auto.offset.reset", "latest");
config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true");
config.setProperty("auto.commit.interval.ms", "1000");
@ -1287,7 +1287,6 @@ public abstract class ConsumerLoop {
config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
config.setProperty("value.deserializer.encoding", "UTF-8");
config.setProperty("experimental.snapshot.enable", "true");
this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed");

View File

@ -797,7 +797,7 @@ TDengine Go 连接器支持订阅功能,应用 API 如下:
```go
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
"group.id": "test",
"auto.offset.reset": "earliest",
"auto.offset.reset": "latest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
@ -873,6 +873,7 @@ package main
import (
"fmt"
"os"
"time"
"github.com/taosdata/driver-go/v3/af"
"github.com/taosdata/driver-go/v3/af/tmq"
@ -893,19 +894,16 @@ func main() {
if err != nil {
panic(err)
}
if err != nil {
panic(err)
}
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
"group.id": "test",
"auto.offset.reset": "earliest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"td.connect.port": "6030",
"client.id": "test_tmq_client",
"enable.auto.commit": "false",
"msg.with.table.name": "true",
"group.id": "test",
"auto.offset.reset": "latest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"td.connect.port": "6030",
"client.id": "test_tmq_client",
"enable.auto.commit": "false",
"msg.with.table.name": "true",
})
if err != nil {
panic(err)
@ -918,10 +916,16 @@ func main() {
if err != nil {
panic(err)
}
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
if err != nil {
panic(err)
}
go func() {
for {
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
if err != nil {
panic(err)
}
time.Sleep(time.Millisecond * 100)
}
}()
for i := 0; i < 5; i++ {
ev := consumer.Poll(500)
if ev != nil {
@ -975,6 +979,7 @@ package main
import (
"database/sql"
"fmt"
"time"
"github.com/taosdata/driver-go/v3/common"
tmqcommon "github.com/taosdata/driver-go/v3/common/tmq"
@ -998,7 +1003,7 @@ func main() {
"td.connect.pass": "taosdata",
"group.id": "example",
"client.id": "example_consumer",
"auto.offset.reset": "earliest",
"auto.offset.reset": "latest",
})
if err != nil {
panic(err)
@ -1007,29 +1012,34 @@ func main() {
if err != nil {
panic(err)
}
_, err = db.Exec("create table example_ws_tmq.t_all(ts timestamp," +
"c1 bool," +
"c2 tinyint," +
"c3 smallint," +
"c4 int," +
"c5 bigint," +
"c6 tinyint unsigned," +
"c7 smallint unsigned," +
"c8 int unsigned," +
"c9 bigint unsigned," +
"c10 float," +
"c11 double," +
"c12 binary(20)," +
"c13 nchar(20)" +
")")
if err != nil {
panic(err)
}
go func() {
_, err := db.Exec("create table example_ws_tmq.t_all(ts timestamp," +
"c1 bool," +
"c2 tinyint," +
"c3 smallint," +
"c4 int," +
"c5 bigint," +
"c6 tinyint unsigned," +
"c7 smallint unsigned," +
"c8 int unsigned," +
"c9 bigint unsigned," +
"c10 float," +
"c11 double," +
"c12 binary(20)," +
"c13 nchar(20)" +
")")
if err != nil {
panic(err)
}
_, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')")
if err != nil {
panic(err)
for {
_, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')")
if err != nil {
panic(err)
}
time.Sleep(time.Millisecond * 100)
}
}()
for i := 0; i < 5; i++ {
ev := consumer.Poll(500)

View File

@ -447,7 +447,7 @@ consumer.unsubscribe().await;
- `group.id`: 同一个消费者组,将以至少消费一次的方式进行消息负载均衡。
- `client.id`: 可选的订阅客户端识别项。
- `auto.offset.reset`: 可选初始化订阅起点, *earliest* 为从头开始订阅, *latest* 为仅从最新数据开始订阅,默认为从头订阅。注意,此选项在同一个 `group.id` 中仅生效一次。
- `auto.offset.reset`: 可选初始化订阅起点, *earliest* 为从头开始订阅, *latest* 为仅从最新数据开始订阅,默认值根据 TDengine 版本有所不同,详细参见 [数据订阅](https://docs.taosdata.com/develop/tmq/)。注意,此选项在同一个 `group.id` 中仅生效一次。
- `enable.auto.commit`: 当设置为 `true` 时,将启用自动标记模式,当对数据一致性不敏感时,可以启用此方式。
- `auto.commit.interval.ms`: 自动标记的时间间隔。

View File

@ -33,11 +33,13 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con
|Python Connector 版本|主要变化|
|:-------------------:|:----:|
|2.7.12|1. 新增 varbinary 类型支持STMT暂不支持 varbinary <br/> 2. query 性能提升(感谢贡献者[hadrianl](https://github.com/taosdata/taos-connector-python/pull/209)|
|2.7.9|数据订阅支持获取消费进度和重置消费进度|
|2.7.8|新增 `execute_many`|
|Python Websocket Connector 版本|主要变化|
|:----------------------------:|:-----:|
|0.2.9|已知问题修复|
|0.2.5|1. 数据订阅支持获取消费进度和重置消费进度 <br/> 2. 支持 schemaless <br/> 3. 支持 STMT|
|0.2.4|数据订阅新增取消订阅方法|

View File

@ -24,7 +24,7 @@ SELECT [hints] [DISTINCT] select_list
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
hint:
BATCH_SCAN | NO_BATCH_SCAN
BATCH_SCAN | NO_BATCH_SCAN | SORT_FOR_GROUP
select_list:
select_expr [, select_expr] ...
@ -87,15 +87,17 @@ Hints 是用户控制单个语句查询优化的一种手段,当 Hint 不适
目前支持的 Hints 列表如下:
| **Hint** | **参数** | **说明** | **适用范围** |
| :-----------: | -------------- | -------------------------- | -------------------------- |
| BATCH_SCAN | 无 | 采用批量读表的方式 | 超级表 JOIN 语句 |
| NO_BATCH_SCAN | 无 | 采用顺序读表的方式 | 超级表 JOIN 语句 |
| **Hint** | **参数** | **说明** | **适用范围** |
| :-----------: | -------------- | -------------------------- | -----------------------------|
| BATCH_SCAN | 无 | 采用批量读表的方式 | 超级表 JOIN 语句 |
| NO_BATCH_SCAN | 无 | 采用顺序读表的方式 | 超级表 JOIN 语句 |
| SORT_FOR_GROUP| 无 | 采用sort方式进行分组 | partition by 列表有普通列时 |
举例:
```sql
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
SELECT /*+ SORT_FOR_GROUP() */ count(*), c1 FROM stable1 PARTITION BY c1;
```
## 列表

View File

@ -54,6 +54,7 @@ LIKE 条件使用通配符字符串进行匹配检查,规则如下:
MATCH 条件和 NMATCH 条件使用正则表达式进行匹配,规则如下:
- 支持符合 POSIX 规范的正则表达式,具体规范内容可参见 Regular Expressions。
- MATCH 和正则表达式匹配时, 返回 TURE. NMATCH 和正则表达式不匹配时, 返回 TRUE.
- 只能针对子表名(即 tbname、字符串类型的标签值进行正则表达式过滤不支持普通列的过滤。
- 正则匹配字符串长度不能超过 128 字节。可以通过参数 maxRegexStringLen 设置和调整最大允许的正则匹配字符串,该参数是客户端配置参数,需要重启客户端才能生效

View File

@ -180,6 +180,7 @@ description: TDengine 保留关键字的详细列表
- MAX_DELAY
- BWLIMIT
- MAXROWS
- MAX_SPEED
- MERGE
- META
- MINROWS

View File

@ -26,7 +26,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
## INS_DNODES
提供 dnode 的相关信息。也可以使用 SHOW DNODES 来查询这些信息。
提供 dnode 的相关信息。也可以使用 SHOW DNODES 来查询这些信息。 SYSINFO 为 0 的用户不能查看此表。
| # | **列名** | **数据类型** | **说明** |
| --- | :------------: | ------------ | ----------------------------------------------------------------------------------------------------- |
@ -40,7 +40,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
## INS_MNODES
提供 mnode 的相关信息。也可以使用 SHOW MNODES 来查询这些信息。
提供 mnode 的相关信息。也可以使用 SHOW MNODES 来查询这些信息。 SYSINFO 为 0 的用户不能查看此表。
| # | **列名** | **数据类型** | **说明** |
| --- | :---------: | ------------ | ------------------ |
@ -52,22 +52,33 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
## INS_QNODES
当前系统中 QNODE 的信息。也可以使用 SHOW QNODES 来查询这些信息。
当前系统中 QNODE 的信息。也可以使用 SHOW QNODES 来查询这些信息。SYSINFO 属性为 0 的用户不能查看此表。
| # | **列名** | **数据类型** | **说明** |
| --- | :---------: | ------------ | ------------ |
| 1 | id | SMALLINT | qnode id |
| 2 | endpoint | BINARY(134) | qnode 的地址 |
| 2 | endpoint | VARCHAR(134) | qnode 的地址 |
| 3 | create_time | TIMESTAMP | 创建时间 |
## INS_SNODES
当前系统中 SNODE 的信息。也可以使用 SHOW SNODES 来查询这些信息。SYSINFO 属性为 0 的用户不能查看此表。
| # | **列名** | **数据类型** | **说明** |
| --- | :---------: | ------------ | ------------ |
| 1 | id | SMALLINT | snode id |
| 2 | endpoint | VARCHAR(134) | snode 的地址 |
| 3 | create_time | TIMESTAMP | 创建时间 |
## INS_CLUSTER
存储集群相关信息。
存储集群相关信息。 SYSINFO 属性为 0 的用户不能查看此表。
| # | **列名** | **数据类型** | **说明** |
| --- | :---------: | ------------ | ---------- |
| 1 | id | BIGINT | cluster id |
| 2 | name | BINARY(134) | 集群名称 |
| 2 | name | VARCHAR(134) | 集群名称 |
| 3 | create_time | TIMESTAMP | 创建时间 |
## INS_DATABASES
@ -76,25 +87,25 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
| # | **列名** | **数据类型** | **说明** |
| --- | :------------------: | ---------------- | ------------------------------------------------ |
| 1 | name | BINARY(32) | 数据库名 |
| 1 | name | VARCHAR(64) | 数据库名 |
| 2 | create_time | TIMESTAMP | 创建时间 |
| 3 | ntables | INT | 数据库中表的数量,包含子表和普通表但不包含超级表 |
| 4 | vgroups | INT | 数据库中有多少个 vgroup。需要注意`vgroups` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 6 | replica | INT | 副本数。需要注意,`replica` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 7 | strict | BINARY(4) | 废弃参数 |
| 8 | duration | INT | 单文件存储数据的时间跨度。需要注意,`duration` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | keep | INT | 数据保留时长。需要注意,`keep` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 7 | strict | VARCHAR(4) | 废弃参数 |
| 8 | duration | VARCHAR(10) | 单文件存储数据的时间跨度。需要注意,`duration` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | keep | VARCHAR(32) | 数据保留时长。需要注意,`keep` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 10 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB。需要注意`buffer` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 11 | pagesize | INT | 每个 VNODE 中元数据存储引擎的页大小,单位为 KB。需要注意`pagesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 12 | pages | INT | 每个 vnode 元数据存储引擎的缓存页个数。需要注意,`pages` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 13 | minrows | INT | 文件块中记录的最大条数。需要注意,`minrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 14 | maxrows | INT | 文件块中记录的最小条数。需要注意,`maxrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 15 | comp | INT | 数据压缩方式。需要注意,`comp` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 16 | precision | BINARY(2) | 时间分辨率。需要注意,`precision` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 17 | status | BINARY(10) | 数据库状态 |
| 18 | retentions | BINARY (60) | 数据的聚合周期和保存时长。需要注意,`retentions` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 16 | precision | VARCHAR(2) | 时间分辨率。需要注意,`precision` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 17 | status | VARCHAR(10) | 数据库状态 |
| 18 | retentions | VARCHAR(60) | 数据的聚合周期和保存时长。需要注意,`retentions` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 19 | single_stable | BOOL | 表示此数据库中是否只可以创建一个超级表。需要注意,`single_stable` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 20 | cachemodel | BINARY(60) | 表示是否在内存中缓存子表的最近数据。需要注意,`cachemodel` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 20 | cachemodel | VARCHAR(60) | 表示是否在内存中缓存子表的最近数据。需要注意,`cachemodel` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 21 | cachesize | INT | 表示每个 vnode 中用于缓存子表最近数据的内存大小。需要注意,`cachesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 22 | wal_level | INT | WAL 级别。需要注意,`wal_level` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 23 | wal_fsync_period | INT | 数据落盘周期。需要注意,`wal_fsync_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
@ -111,15 +122,15 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
| # | **列名** | **数据类型** | **说明** |
| --- | :-----------: | ------------- | --------------------------------------------------------------------------------------------- |
| 1 | name | BINARY(64) | 函数名 |
| 2 | comment | BINARY(255) | 补充说明。需要注意,`comment` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 1 | name | VARCHAR(64) | 函数名 |
| 2 | comment | VARCHAR(255) | 补充说明。需要注意,`comment` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 3 | aggregate | INT | 是否为聚合函数。需要注意,`aggregate` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 4 | output_type | BINARY(31) | 输出类型 |
| 4 | output_type | VARCHAR(31) | 输出类型 |
| 5 | create_time | TIMESTAMP | 创建时间 |
| 6 | code_len | INT | 代码长度 |
| 7 | bufsize | INT | buffer 大小 |
| 8 | func_language | BINARY(31) | 自定义函数编程语言 |
| 9 | func_body | BINARY(16384) | 函数体定义 |
| 8 | func_language | VARCHAR(31) | 自定义函数编程语言 |
| 9 | func_body | VARCHAR(16384) | 函数体定义 |
| 10 | func_version | INT | 函数版本号。初始版本为0每次替换更新版本号加1。 |
@ -129,12 +140,12 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
| # | **列名** | **数据类型** | **说明** |
| --- | :--------------: | ------------ | ------------------------------------------------------- |
| 1 | db_name | BINARY(32) | 包含此索引的表所在的数据库名 |
| 2 | table_name | BINARY(192) | 包含此索引的表的名称 |
| 3 | index_name | BINARY(192) | 索引名 |
| 4 | column_name | BINARY(64) | 建索引的列的列名 |
| 5 | index_type | BINARY(10) | 目前有 SMA 和 tag |
| 6 | index_extensions | BINARY(256) | 索引的额外信息。对 SMA/tag 类型的索引,是函数名的列表。 |
| 1 | db_name | VARCHAR(32) | 包含此索引的表所在的数据库名 |
| 2 | table_name | VARCHAR(192) | 包含此索引的表的名称 |
| 3 | index_name | VARCHAR(192) | 索引名 |
| 4 | column_name | VARCHAR(64) | 建索引的列的列名 |
| 5 | index_type | VARCHAR(10) | 目前有 SMA 和 tag |
| 6 | index_extensions | VARCHAR(256) | 索引的额外信息。对 SMA/tag 类型的索引,是函数名的列表。 |
## INS_STABLES
@ -142,16 +153,16 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
| # | **列名** | **数据类型** | **说明** |
| --- | :-----------: | ------------ | ----------------------------------------------------------------------------------------------------- |
| 1 | stable_name | BINARY(192) | 超级表表名 |
| 2 | db_name | BINARY(64) | 超级表所在的数据库的名称 |
| 1 | stable_name | VARCHAR(192) | 超级表表名 |
| 2 | db_name | VARCHAR(64) | 超级表所在的数据库的名称 |
| 3 | create_time | TIMESTAMP | 创建时间 |
| 4 | columns | INT | 列数目 |
| 5 | tags | INT | 标签数目。需要注意,`tags` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 6 | last_update | TIMESTAMP | 最后更新时间 |
| 7 | table_comment | BINARY(1024) | 表注释 |
| 8 | watermark | BINARY(64) | 窗口的关闭时间。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | max_delay | BINARY(64) | 推送计算结果的最大延迟。需要注意,`max_delay` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 10 | rollup | BINARY(128) | rollup 聚合函数。需要注意,`rollup` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 7 | table_comment | VARCHAR(1024) | 表注释 |
| 8 | watermark | VARCHAR(64) | 窗口的关闭时间。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | max_delay | VARCHAR(64) | 推送计算结果的最大延迟。需要注意,`max_delay` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 10 | rollup | VARCHAR(128) | rollup 聚合函数。需要注意,`rollup` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
## INS_TABLES
@ -159,37 +170,37 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
| # | **列名** | **数据类型** | **说明** |
| --- | :-----------: | ------------ | ------------------------------------------------------------------------------------- |
| 1 | table_name | BINARY(192) | 表名 |
| 2 | db_name | BINARY(64) | 数据库名 |
| 1 | table_name | VARCHAR(192) | 表名 |
| 2 | db_name | VARCHAR(64) | 数据库名 |
| 3 | create_time | TIMESTAMP | 创建时间 |
| 4 | columns | INT | 列数目 |
| 5 | stable_name | BINARY(192) | 所属的超级表表名 |
| 5 | stable_name | VARCHAR(192) | 所属的超级表表名 |
| 6 | uid | BIGINT | 表 id |
| 7 | vgroup_id | INT | vgroup id |
| 8 | ttl | INT | 表的生命周期。需要注意,`ttl` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | table_comment | BINARY(1024) | 表注释 |
| 10 | type | BINARY(21) | 表类型 |
| 9 | table_comment | VARCHAR(1024) | 表注释 |
| 10 | type | VARCHAR(21) | 表类型 |
## INS_TAGS
| # | **列名** | **数据类型** | **说明** |
| --- | :---------: | ------------- | ---------------------- |
| 1 | table_name | BINARY(192) | 表名 |
| 2 | db_name | BINARY(64) | 该表所在的数据库的名称 |
| 3 | stable_name | BINARY(192) | 所属的超级表表名 |
| 4 | tag_name | BINARY(64) | tag 的名称 |
| 5 | tag_type | BINARY(64) | tag 的类型 |
| 6 | tag_value | BINARY(16384) | tag 的值 |
| 1 | table_name | VARCHAR(192) | 表名 |
| 2 | db_name | VARCHAR(64) | 该表所在的数据库的名称 |
| 3 | stable_name | VARCHAR(192) | 所属的超级表表名 |
| 4 | tag_name | VARCHAR(64) | tag 的名称 |
| 5 | tag_type | VARCHAR(64) | tag 的类型 |
| 6 | tag_value | VARCHAR(16384) | tag 的值 |
## INS_COLUMNS
| # | **列名** | **数据类型** | **说明** |
| --- | :-----------: | ------------ | ---------------------- |
| 1 | table_name | BINARY(192) | 表名 |
| 2 | db_name | BINARY(64) | 该表所在的数据库的名称 |
| 3 | table_type | BINARY(21) | 表类型 |
| 4 | col_name | BINARY(64) | 列 的名称 |
| 5 | col_type | BINARY(32) | 列 的类型 |
| 1 | table_name | VARCHAR(192) | 表名 |
| 2 | db_name | VARCHAR(64) | 该表所在的数据库的名称 |
| 3 | table_type | VARCHAR(21) | 表类型 |
| 4 | col_name | VARCHAR(64) | 列 的名称 |
| 5 | col_type | VARCHAR(32) | 列 的类型 |
| 6 | col_length | INT | 列 的长度 |
| 7 | col_precision | INT | 列 的精度 |
| 8 | col_scale | INT | 列 的比例 |
@ -197,51 +208,51 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
## INS_USERS
提供系统中创建的用户的相关信息。
提供系统中创建的用户的相关信息. SYSINFO 属性为0 的用户不能查看此表
| # | **列名** | **数据类型** | **说明** |
| --- | :---------: | ------------ | -------- |
| 1 | user_name | BINARY(23) | 用户名 |
| 2 | privilege | BINARY(256) | 权限 |
| 1 | user_name | VARCHAR(23) | 用户名 |
| 2 | privilege | VARCHAR(256) | 权限 |
| 3 | create_time | TIMESTAMP | 创建时间 |
## INS_GRANTS
提供企业版授权的相关信息。
提供企业版授权的相关信息。SYSINFO 属性为 0 的用户不能查看此表。
| # | **列名** | **数据类型** | **说明** |
| --- | :---------: | ------------ | --------------------------------------------------------------------------------------------------------- |
| 1 | version | BINARY(9) | 企业版授权说明official(官方授权的)/trial(试用的) |
| 2 | cpu_cores | BINARY(9) | 授权使用的 CPU 核心数量 |
| 3 | dnodes | BINARY(10) | 授权使用的 dnode 节点数量。需要注意,`dnodes` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 4 | streams | BINARY(10) | 授权创建的流数量。需要注意,`streams` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 5 | users | BINARY(10) | 授权创建的用户数量。需要注意,`users` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 6 | accounts | BINARY(10) | 授权创建的帐户数量。需要注意,`accounts` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 7 | storage | BINARY(21) | 授权使用的存储空间大小。需要注意,`storage` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 8 | connections | BINARY(21) | 授权使用的客户端连接数量。需要注意,`connections` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | databases | BINARY(11) | 授权使用的数据库数量。需要注意,`databases` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 10 | speed | BINARY(9) | 授权使用的数据点每秒写入数量 |
| 11 | querytime | BINARY(9) | 授权使用的查询总时长 |
| 12 | timeseries | BINARY(21) | 授权使用的测点数量 |
| 13 | expired | BINARY(5) | 是否到期true到期false未到期 |
| 14 | expire_time | BINARY(19) | 试用期到期时间 |
| 1 | version | VARCHAR(9) | 企业版授权说明official(官方授权的)/trial(试用的) |
| 2 | cpu_cores | VARCHAR(9) | 授权使用的 CPU 核心数量 |
| 3 | dnodes | VARCHAR(10) | 授权使用的 dnode 节点数量。需要注意,`dnodes` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 4 | streams | VARCHAR(10) | 授权创建的流数量。需要注意,`streams` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 5 | users | VARCHAR(10) | 授权创建的用户数量。需要注意,`users` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 6 | accounts | VARCHAR(10) | 授权创建的帐户数量。需要注意,`accounts` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 7 | storage | VARCHAR(21) | 授权使用的存储空间大小。需要注意,`storage` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 8 | connections | VARCHAR(21) | 授权使用的客户端连接数量。需要注意,`connections` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | databases | VARCHAR(11) | 授权使用的数据库数量。需要注意,`databases` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 10 | speed | VARCHAR(9) | 授权使用的数据点每秒写入数量 |
| 11 | querytime | VARCHAR(9) | 授权使用的查询总时长 |
| 12 | timeseries | VARCHAR(21) | 授权使用的测点数量 |
| 13 | expired | VARCHAR(5) | 是否到期true到期false未到期 |
| 14 | expire_time | VARCHAR(19) | 试用期到期时间 |
## INS_VGROUPS
系统中所有 vgroups 的信息。
系统中所有 vgroups 的信息。SYSINFO 属性为 0 的用户不能查看此表。
| # | **列名** | **数据类型** | **说明** |
| --- | :-------: | ------------ | ------------------------------------------------------------------------------------------------ |
| 1 | vgroup_id | INT | vgroup id |
| 2 | db_name | BINARY(32) | 数据库名 |
| 2 | db_name | VARCHAR(32) | 数据库名 |
| 3 | tables | INT | 此 vgroup 内有多少表。需要注意,`tables` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 4 | status | BINARY(10) | 此 vgroup 的状态 |
| 4 | status | VARCHAR(10) | 此 vgroup 的状态 |
| 5 | v1_dnode | INT | 第一个成员所在的 dnode 的 id |
| 6 | v1_status | BINARY(10) | 第一个成员的状态 |
| 6 | v1_status | VARCHAR(10) | 第一个成员的状态 |
| 7 | v2_dnode | INT | 第二个成员所在的 dnode 的 id |
| 8 | v2_status | BINARY(10) | 第二个成员的状态 |
| 8 | v2_status | VARCHAR(10) | 第二个成员的状态 |
| 9 | v3_dnode | INT | 第三个成员所在的 dnode 的 id |
| 10 | v3_status | BINARY(10) | 第三个成员的状态 |
| 10 | v3_status | VARCHAR(10) | 第三个成员的状态 |
| 11 | nfiles | INT | 此 vgroup 中数据/元数据文件的数量 |
| 12 | file_size | INT | 此 vgroup 中数据/元数据文件的大小 |
| 13 | tsma | TINYINT | 此 vgroup 是否专用于 Time-range-wise SMA1: 是, 0: 否 |
@ -252,55 +263,57 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
| # | **列名** | **数据类型** | **说明** |
| --- | :------: | ------------ | --------------------------------------------------------------------------------------- |
| 1 | name | BINARY(32) | 配置项名称 |
| 2 | value | BINARY(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 1 | name | VARCHAR(32) | 配置项名称 |
| 2 | value | VARCHAR(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
## INS_DNODE_VARIABLES
系统中每个 dnode 的配置参数。
系统中每个 dnode 的配置参数。SYSINFO 属性 为 0 的用户不能查看此表。
| # | **列名** | **数据类型** | **说明** |
| --- | :------: | ------------ | --------------------------------------------------------------------------------------- |
| 1 | dnode_id | INT | dnode 的 ID |
| 2 | name | BINARY(32) | 配置项名称 |
| 3 | value | BINARY(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 2 | name | VARCHAR(32) | 配置项名称 |
| 3 | value | VARCHAR(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
## INS_TOPICS
| # | **列名** | **数据类型** | **说明** |
| --- | :---------: | ------------ | ------------------------------ |
| 1 | topic_name | BINARY(192) | topic 名称 |
| 2 | db_name | BINARY(64) | topic 相关的 DB |
| 1 | topic_name | VARCHAR(192) | topic 名称 |
| 2 | db_name | VARCHAR(64) | topic 相关的 DB |
| 3 | create_time | TIMESTAMP | topic 的 创建时间 |
| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 |
| 4 | sql | VARCHAR(1024) | 创建该 topic 时所用的 SQL 语句 |
## INS_SUBSCRIPTIONS
| # | **列名** | **数据类型** | **说明** |
| --- | :------------: | ------------ | ------------------------ |
| 1 | topic_name | BINARY(204) | 被订阅的 topic |
| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 |
| 1 | topic_name | VARCHAR(204) | 被订阅的 topic |
| 2 | consumer_group | VARCHAR(193) | 订阅者的消费者组 |
| 3 | vgroup_id | INT | 消费者被分配的 vgroup id |
| 4 | consumer_id | BIGINT | 消费者的唯一 id |
| 5 | offset | BINARY(64) | 消费者的消费进度 |
| 5 | offset | VARCHAR(64) | 消费者的消费进度 |
| 6 | rows | BIGINT | 消费者的消费的数据条数 |
## INS_STREAMS
| # | **列名** | **数据类型** | **说明** |
| --- | :----------: | ------------ | -------------------------------------------------------------------------------------------------------------------- |
| 1 | stream_name | BINARY(64) | 流计算名称 |
| 1 | stream_name | VARCHAR(64) | 流计算名称 |
| 2 | create_time | TIMESTAMP | 创建时间 |
| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 |
| 4 | status | BINARY(20) | 流当前状态 |
| 5 | source_db | BINARY(64) | 源数据库 |
| 6 | target_db | BINARY(64) | 目的数据库 |
| 7 | target_table | BINARY(192) | 流计算写入的目标表 |
| 3 | sql | VARCHAR(1024) | 创建流计算时提供的 SQL 语句 |
| 4 | status | VARCHAR(20) | 流当前状态 |
| 5 | source_db | VARCHAR(64) | 源数据库 |
| 6 | target_db | VARCHAR(64) | 目的数据库 |
| 7 | target_table | VARCHAR(192) | 流计算写入的目标表 |
| 8 | watermark | BIGINT | watermark详见 SQL 手册流式计算。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算。需要注意,`trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
## INS_USER_PRIVILEGES
SYSINFO 属性为 0 的用户不能查看此表。
| # | **列名** | **数据类型** | **说明** |
| --- | :----------: | ------------ | -------------------------------------------------------------------------------------------------------------------- |
| 1 | user_name | VARCHAR(24) | 用户名

View File

@ -73,10 +73,10 @@ SHOW CREATE TABLE [db_name.]tb_name
## SHOW DATABASES
```sql
SHOW DATABASES;
SHOW [USER | SYSTEM] DATABASES;
```
显示用户定义的所有数据库。
显示定义的所有数据库。SYSTEM 指定只显示系统数据库。USER 指定只显示用户创建的数据库。
## SHOW DNODES
@ -183,10 +183,10 @@ SHOW SUBSCRIPTIONS;
## SHOW TABLES
```sql
SHOW [db_name.]TABLES [LIKE 'pattern'];
SHOW [NORMAL | CHILD] [db_name.]TABLES [LIKE 'pattern'];
```
显示当前数据库下的所有普通表和子表的信息。可以使用 LIKE 对表名进行模糊匹配。
显示当前数据库下的所有普通表和子表的信息。可以使用 LIKE 对表名进行模糊匹配。NORMAL 指定只显示普通表信息, CHILD 指定只显示子表信息。
## SHOW TABLE DISTRIBUTED

View File

@ -395,6 +395,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
### 查询场景配置参数
查询场景下 `filetype` 必须设置为 `query`
`query_times` 指定运行查询的次数,数值类型
查询场景可以通过设置 `kill_slow_query_threshold``kill_slow_query_interval` 参数来控制杀掉慢查询语句的执行threshold 控制如果 exec_usec 超过指定时间的查询将被 taosBenchmark 杀掉单位为秒interval 控制休眠时间,避免持续查询慢查询消耗 CPU ,单位为秒。

View File

@ -106,7 +106,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
use letter and number only. Default is NOT.
-n, --no-escape No escape char '`'. Default is using it.
-Q, --dot-replace Repalce dot character with underline character in
the table name.
the table name.(Version 2.5.3)
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
8.
-C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service
@ -116,6 +116,10 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
-?, --help Give this help list
--usage Give a short usage message
-V, --version Print program version
-W, --rename=RENAME-LIST Rename database name with new name during
importing data. RENAME-LIST:
"db1=newDB1|db2=newDB2" means rename db1 to newDB1
and rename db2 to newDB2 (Version 2.5.4)
Mandatory or optional arguments to long options are also mandatory or optional
for any corresponding short options.

View File

@ -648,7 +648,16 @@ charset 的有效值是 UTF-8。
| 适用范围 | 仅客户端适用 |
| 含义 | schemaless 自定义的子表名的 key |
| 类型 | 字符串 |
| 缺省值 | 无 |
| 缺省值 | 无
### smlAutoChildTableNameDelimiter
| 属性 | 说明 |
| -------- | ------------------------------- |
| 适用范围 | 仅客户端适用 |
| 含义 | schemaless tag之间的连接符连起来作为子表名 |
| 类型 | 字符串 |
| 缺省值 | 无 |
### smlTagName

View File

@ -94,8 +94,11 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
:::tip
需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序而是使用了标签名称按照字符串升序排列后的结果。所以tag_key1 并不是在行协议中输入的第一个标签。
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
:::tip
为了让用户可以指定生成的表名可以通过在taos.cfg里配置 smlChildTableName 参数来指定。
:::tip
如果不想用自动生成的表名,有两种指定子表名的方式,第一种优先级更高:
通过在taos.cfg里配置 smlAutoChildTableNameDelimiter 参数来指定。
举例如下:配置 smlAutoChildTableNameDelimiter=- 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1-4。
通过在taos.cfg里配置 smlChildTableName 参数来指定。
举例如下:配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)。

View File

@ -9,8 +9,6 @@ TDengine 通过 [taosKeeper](/reference/taosKeeper/) 将服务器的 CPU、内
## TDinsight - 使用监控数据库 + Grafana 对 TDengine 进行监控的解决方案
监控数据库将提供更多的监控项,您可以从 [TDinsight Grafana Dashboard](/reference/tdinsight/) 了解如何使用 TDinsight 方案对 TDengine 进行监控。
我们提供了一个自动化脚本 `TDinsight.sh` 对 TDinsight 进行部署。
下载 `TDinsight.sh`
@ -37,8 +35,6 @@ chmod +x TDinsight.sh
运行程序并重启 Grafana 服务,打开面板:`http://localhost:3000/d/tdinsight`。
更多使用场景和限制请参考[TDinsight](/reference/tdinsight/) 文档。
## log 库
TDinsight dashboard 数据来源于 log 库存放监控数据的默认db可以在 taoskeeper 配置文件中修改,具体参考 [taoskeeper 文档](/reference/taosKeeper)。taoskeeper 启动后会自动创建 log 库,并将监控数据写入到该数据库中。
@ -102,22 +98,22 @@ TDinsight dashboard 数据来源于 log 库存放监控数据的默认db
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|uptime|FLOAT||dnode uptime|
|uptime|FLOAT||dnode uptime,单位:天|
|cpu\_engine|FLOAT||taosd cpu 使用率,从 `/proc/<taosd_pid>/stat` 读取|
|cpu\_system|FLOAT||服务器 cpu 使用率,从 `/proc/stat` 读取|
|cpu\_cores|FLOAT||服务器 cpu 核数|
|mem\_engine|INT||taosd 内存使用率,从 `/proc/<taosd_pid>/status` 读取|
|mem\_system|INT||服务器可用内存|
|mem\_system|INT||服务器可用内存,单位 KB|
|mem\_total|INT||服务器内存总量,单位 KB|
|disk\_engine|INT|||
|disk\_engine|INT||单位 bytes|
|disk\_used|BIGINT||data dir 挂载的磁盘使用量,单位 bytes|
|disk\_total|BIGINT||data dir 挂载的磁盘总容量,单位 bytes|
|net\_in|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 received bytes。单位 kb/s|
|net\_out|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 transmit bytes。单位 kb/s|
|io\_read|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 rchar 与上次数值计算之后,计算得到速度。单位 kb/s|
|io\_write|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 wchar 与上次数值计算之后,计算得到速度。单位 kb/s|
|io\_read\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 read_bytes。单位 kb/s|
|io\_write\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 write_bytes。单位 kb/s|
|net\_in|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 received bytes。单位 byte/s|
|net\_out|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 transmit bytes。单位 byte/s|
|io\_read|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 rchar 与上次数值计算之后,计算得到速度。单位 byte/s|
|io\_write|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 wchar 与上次数值计算之后,计算得到速度。单位 byte/s|
|io\_read\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 read_bytes。单位 byte/s|
|io\_write\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 write_bytes。单位 byte/s|
|req\_select|INT||两个间隔内发生的查询请求数目|
|req\_select\_rate|FLOAT||两个间隔内的查询请求速度 = `req_select / monitorInterval`|
|req\_insert|INT||两个间隔内发生的写入请求,包含的单条数据数目|
@ -146,9 +142,9 @@ TDinsight dashboard 数据来源于 log 库存放监控数据的默认db
|ts|TIMESTAMP||timestamp|
|name|NCHAR||data 目录,一般为 `/var/lib/taos`|
|level|INT||0、1、2 多级存储级别|
|avail|BIGINT||data 目录可用空间|
|used|BIGINT||data 目录已使用空间|
|total|BIGINT||data 目录空间|
|avail|BIGINT||data 目录可用空间。单位 byte|
|used|BIGINT||data 目录已使用空间。单位 byte|
|total|BIGINT||data 目录空间。单位 byte|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
@ -161,9 +157,9 @@ TDinsight dashboard 数据来源于 log 库存放监控数据的默认db
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||log 目录名,一般为 `/var/log/taos/`|
|avail|BIGINT||log 目录可用空间|
|used|BIGINT||log 目录已使用空间|
|total|BIGINT||log 目录空间|
|avail|BIGINT||log 目录可用空间。单位 byte|
|used|BIGINT||log 目录已使用空间。单位 byte|
|total|BIGINT||log 目录空间。单位 byte|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
@ -176,9 +172,9 @@ TDinsight dashboard 数据来源于 log 库存放监控数据的默认db
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||temp 目录名,一般为 `/tmp/`|
|avail|BIGINT||temp 目录可用空间|
|used|BIGINT||temp 目录已使用空间|
|total|BIGINT||temp 目录空间|
|avail|BIGINT||temp 目录可用空间。单位 byte|
|used|BIGINT||temp 目录已使用空间。单位 byte|
|total|BIGINT||temp 目录空间。单位 byte|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|

View File

@ -14,40 +14,7 @@ Seeq 是制造业和工业互联网IIOT高级分析软件。Seeq 支持在
### Seeq 安装方法
从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。
### Seeq Server 安装和启动
```
tar xvzf seeq-server-xxx.tar.gz
cd seeq-server-installer
sudo ./install
sudo seeq service enable
sudo seeq start
```
### Seeq Data Lab Server 安装和启动
Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见[Seeq 官方文档](https://support.seeq.com/space/KB/1034059842)。
```
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq
sudo seeq config set Network/DataLab/Hostname localhost
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231)
sudo seeq config set Network/Hostname <value> # the host IP or URL of the main Seeq Server
# If the main Seeq server is configured to listen over HTTPS
sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443)
# If the main Seeq server is NOT configured to listen over HTTPS
sudo seeq config set Network/Webserver/Port <value>
#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server:
sudo seeq config set Network/DataLab/Hostname <value> # the host IP (not URL) of the Data Lab server
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231
```
从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见[Seeq 知识库]( https://support.seeq.com/kb/latest/cloud/)。
## TDengine 本地实例安装方法

View File

@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3";
## 3.2.0.0
<Release type="tdengine" version="3.2.0.0" />
## 3.1.1.0
<Release type="tdengine" version="3.1.1.0" />

View File

@ -44,17 +44,17 @@ OS name: "windows 10", version: "10.0", arch: "amd64", family: "windows"
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
<!-- 配置本地maven仓库的路径 -->
<!-- 配置本地maven仓库的路径 -->
<localRepository>D:\apache-maven-localRepository</localRepository>
<mirrors>
<!-- 配置阿里云Maven镜像仓库 -->
<mirror>
<id>alimaven</id>
<name>aliyun maven</name>
<url>http://maven.aliyun.com/nexus/content/groups/public/</url>
<mirrorOf>central</mirrorOf>
</mirror>
<mirror>
<id>alimaven</id>
<name>aliyun maven</name>
<url>http://maven.aliyun.com/nexus/content/groups/public/</url>
<mirrorOf>central</mirrorOf>
</mirror>
</mirrors>
<profiles>
@ -126,7 +126,7 @@ https://www.taosdata.com/cn/all-downloads/
修改client的hosts文件C:\Windows\System32\drivers\etc\hosts将server的hostname和ip配置到client的hosts文件中
```
192.168.236.136 td01
192.168.236.136 td01
```
配置完成后在命令行内使用TDengine CLI连接server端

3
examples/go/BUILD.md Normal file
View File

@ -0,0 +1,3 @@
go mod init demo
go mod tidy
go build

View File

@ -55,8 +55,8 @@ typedef struct SSessionKey {
} SSessionKey;
typedef struct SVersionRange {
uint64_t minVer;
uint64_t maxVer;
int64_t minVer;
int64_t maxVer;
} SVersionRange;
static inline int winKeyCmprImpl(const void* pKey1, const void* pKey2) {

View File

@ -108,7 +108,7 @@ int32_t tBufferReserve(SBuffer *pBuffer, int64_t nData, void **ppData);
int32_t tRowBuild(SArray *aColVal, const STSchema *pTSchema, SRow **ppRow);
int32_t tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal);
void tRowDestroy(SRow *pRow);
void tRowSort(SArray *aRowP);
int32_t tRowSort(SArray *aRowP);
int32_t tRowMerge(SArray *aRowP, STSchema *pTSchema, int8_t flag);
int32_t tRowUpsertColData(SRow *pRow, STSchema *pTSchema, SColData *aColData, int32_t nColData, int32_t flag);

View File

@ -179,6 +179,7 @@ extern char tsUdfdLdLibPath[];
// schemaless
extern char tsSmlChildTableName[];
extern char tsSmlAutoChildTableNameDelimiter[];
extern char tsSmlTagName[];
extern bool tsSmlDot2Underline;
extern char tsSmlTsDefaultName[];

View File

@ -52,9 +52,9 @@ typedef enum {
int32_t grantCheck(EGrantType grant);
#ifndef TD_GRANT_OPTIMIZE
int32_t grantAlterActiveCode(const char* old, const char* new, char* out, int8_t type);
int32_t grantAlterActiveCode(const char* old, const char* newer, char* out, int8_t type);
#else
int32_t grantAlterActiveCode(int32_t did, const char* old, const char* new, char* out, int8_t type);
int32_t grantAlterActiveCode(int32_t did, const char* old, const char* newer, char* out, int8_t type);
#endif
#ifndef GRANTS_CFG
@ -114,4 +114,4 @@ int32_t grantAlterActiveCode(int32_t did, const char* old, const char* new, char
}
#endif
#endif /*_TD_COMMON_GRANT_H_*/
#endif /*_TD_COMMON_GRANT_H_*/

View File

@ -768,6 +768,8 @@ typedef struct {
char* pAst2;
int64_t deleteMark1;
int64_t deleteMark2;
int32_t sqlLen;
char* sql;
} SMCreateStbReq;
int32_t tSerializeSMCreateStbReq(void* buf, int32_t bufLen, SMCreateStbReq* pReq);
@ -788,10 +790,13 @@ typedef struct {
int8_t source; // 1-taosX or 0-taosClient
int8_t reserved[6];
tb_uid_t suid;
int32_t sqlLen;
char* sql;
} SMDropStbReq;
int32_t tSerializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq);
int32_t tDeserializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq);
void tFreeSMDropStbReq(SMDropStbReq *pReq);
typedef struct {
char name[TSDB_TABLE_FNAME_LEN];
@ -801,6 +806,8 @@ typedef struct {
int32_t ttl;
int32_t commentLen;
char* comment;
int32_t sqlLen;
char* sql;
} SMAlterStbReq;
int32_t tSerializeSMAlterStbReq(void* buf, int32_t bufLen, SMAlterStbReq* pReq);
@ -871,10 +878,13 @@ int32_t tDeserializeSCreateAcctReq(void* buf, int32_t bufLen, SCreateAcctReq* pR
typedef struct {
char user[TSDB_USER_LEN];
int32_t sqlLen;
char* sql;
} SDropUserReq, SDropAcctReq;
int32_t tSerializeSDropUserReq(void* buf, int32_t bufLen, SDropUserReq* pReq);
int32_t tDeserializeSDropUserReq(void* buf, int32_t bufLen, SDropUserReq* pReq);
void tFreeSDropUserReq(SDropUserReq *pReq);
typedef struct SIpV4Range{
uint32_t ip;
@ -888,19 +898,21 @@ typedef struct {
SIpWhiteList* cloneIpWhiteList(SIpWhiteList* pIpWhiteList);
typedef struct {
int8_t createType;
int8_t superUser; // denote if it is a super user or not
int8_t sysInfo;
int8_t enable;
char user[TSDB_USER_LEN];
char pass[TSDB_USET_PASSWORD_LEN];
int8_t createType;
int8_t superUser; // denote if it is a super user or not
int8_t sysInfo;
int8_t enable;
char user[TSDB_USER_LEN];
char pass[TSDB_USET_PASSWORD_LEN];
int32_t numIpRanges;
SIpV4Range* pIpRanges;
int32_t sqlLen;
char* sql;
} SCreateUserReq;
int32_t tSerializeSCreateUserReq(void* buf, int32_t bufLen, SCreateUserReq* pReq);
int32_t tDeserializeSCreateUserReq(void* buf, int32_t bufLen, SCreateUserReq* pReq);
void tFreeSCreateUserReq(SCreateUserReq* pReq);
void tFreeSCreateUserReq(SCreateUserReq *pReq);
typedef struct {
int64_t ver;
@ -927,18 +939,20 @@ int32_t tSerializeRetrieveIpWhite(void* buf, int32_t bufLen, SRetrieveIpWhiteReq
int32_t tDeserializeRetrieveIpWhite(void* buf, int32_t bufLen, SRetrieveIpWhiteReq* pReq);
typedef struct {
int8_t alterType;
int8_t superUser;
int8_t sysInfo;
int8_t enable;
char user[TSDB_USER_LEN];
char pass[TSDB_USET_PASSWORD_LEN];
char objname[TSDB_DB_FNAME_LEN]; // db or topic
char tabName[TSDB_TABLE_NAME_LEN];
char* tagCond;
int32_t tagCondLen;
int8_t alterType;
int8_t superUser;
int8_t sysInfo;
int8_t enable;
char user[TSDB_USER_LEN];
char pass[TSDB_USET_PASSWORD_LEN];
char objname[TSDB_DB_FNAME_LEN]; // db or topic
char tabName[TSDB_TABLE_NAME_LEN];
char* tagCond;
int32_t tagCondLen;
int32_t numIpRanges;
SIpV4Range* pIpRanges;
int32_t sqlLen;
char* sql;
} SAlterUserReq;
int32_t tSerializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
@ -1118,6 +1132,8 @@ typedef struct {
int16_t hashPrefix;
int16_t hashSuffix;
int32_t tsdbPageSize;
int32_t sqlLen;
char* sql;
} SCreateDbReq;
int32_t tSerializeSCreateDbReq(void* buf, int32_t bufLen, SCreateDbReq* pReq);
@ -1144,18 +1160,24 @@ typedef struct {
int32_t minRows;
int32_t walRetentionPeriod;
int32_t walRetentionSize;
int32_t sqlLen;
char* sql;
} SAlterDbReq;
int32_t tSerializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq);
int32_t tDeserializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq);
void tFreeSAlterDbReq(SAlterDbReq* pReq);
typedef struct {
char db[TSDB_DB_FNAME_LEN];
int8_t ignoreNotExists;
int32_t sqlLen;
char* sql;
} SDropDbReq;
int32_t tSerializeSDropDbReq(void* buf, int32_t bufLen, SDropDbReq* pReq);
int32_t tDeserializeSDropDbReq(void* buf, int32_t bufLen, SDropDbReq* pReq);
void tFreeSDropDbReq(SDropDbReq* pReq);
typedef struct {
char db[TSDB_DB_FNAME_LEN];
@ -1350,10 +1372,13 @@ void tFreeSUserAuthBatchRsp(SUserAuthBatchRsp* pRsp);
typedef struct {
char db[TSDB_DB_FNAME_LEN];
STimeWindow timeRange;
int32_t sqlLen;
char* sql;
} SCompactDbReq;
int32_t tSerializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq);
int32_t tDeserializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq);
void tFreeSCompactDbReq(SCompactDbReq *pReq);
typedef struct {
char name[TSDB_FUNC_NAME_LEN];
@ -1933,10 +1958,13 @@ void tFreeSExplainRsp(SExplainRsp* pRsp);
typedef struct {
char fqdn[TSDB_FQDN_LEN]; // end point, hostname:port
int32_t port;
int32_t sqlLen;
char* sql;
} SCreateDnodeReq;
int32_t tSerializeSCreateDnodeReq(void* buf, int32_t bufLen, SCreateDnodeReq* pReq);
int32_t tDeserializeSCreateDnodeReq(void* buf, int32_t bufLen, SCreateDnodeReq* pReq);
void tFreeSCreateDnodeReq(SCreateDnodeReq* pReq);
typedef struct {
int32_t dnodeId;
@ -1944,10 +1972,13 @@ typedef struct {
int32_t port;
int8_t force;
int8_t unsafe;
int32_t sqlLen;
char* sql;
} SDropDnodeReq;
int32_t tSerializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
int32_t tDeserializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
void tFreeSDropDnodeReq(SDropDnodeReq* pReq);
enum {
RESTORE_TYPE__ALL = 1,
@ -1959,19 +1990,25 @@ enum {
typedef struct {
int32_t dnodeId;
int8_t restoreType;
int32_t sqlLen;
char* sql;
} SRestoreDnodeReq;
int32_t tSerializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq);
int32_t tDeserializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq);
void tFreeSRestoreDnodeReq(SRestoreDnodeReq *pReq);
typedef struct {
int32_t dnodeId;
char config[TSDB_DNODE_CONFIG_LEN];
char value[TSDB_DNODE_VALUE_LEN];
int32_t sqlLen;
char* sql;
} SMCfgDnodeReq;
int32_t tSerializeSMCfgDnodeReq(void* buf, int32_t bufLen, SMCfgDnodeReq* pReq);
int32_t tDeserializeSMCfgDnodeReq(void* buf, int32_t bufLen, SMCfgDnodeReq* pReq);
void tFreeSMCfgDnodeReq(SMCfgDnodeReq *pReq);
typedef struct {
char config[TSDB_DNODE_CONFIG_LEN];
@ -1983,12 +2020,15 @@ int32_t tDeserializeSDCfgDnodeReq(void* buf, int32_t bufLen, SDCfgDnodeReq* pReq
typedef struct {
int32_t dnodeId;
int32_t sqlLen;
char* sql;
} SMCreateMnodeReq, SMDropMnodeReq, SDDropMnodeReq, SMCreateQnodeReq, SMDropQnodeReq, SDCreateQnodeReq, SDDropQnodeReq,
SMCreateSnodeReq, SMDropSnodeReq, SDCreateSnodeReq, SDDropSnodeReq;
int32_t tSerializeSCreateDropMQSNodeReq(void* buf, int32_t bufLen, SMCreateQnodeReq* pReq);
int32_t tDeserializeSCreateDropMQSNodeReq(void* buf, int32_t bufLen, SMCreateQnodeReq* pReq);
void tFreeSMCreateQnodeReq(SMCreateQnodeReq *pReq);
void tFreeSDDropQnodeReq(SDDropQnodeReq* pReq);
typedef struct {
int8_t replica;
SReplica replicas[TSDB_MAX_REPLICA];
@ -2023,10 +2063,13 @@ int32_t tDeserializeSKillTransReq(void* buf, int32_t bufLen, SKillTransReq* pReq
typedef struct {
int32_t useless; // useless
int32_t sqlLen;
char* sql;
} SBalanceVgroupReq;
int32_t tSerializeSBalanceVgroupReq(void* buf, int32_t bufLen, SBalanceVgroupReq* pReq);
int32_t tDeserializeSBalanceVgroupReq(void* buf, int32_t bufLen, SBalanceVgroupReq* pReq);
void tFreeSBalanceVgroupReq(SBalanceVgroupReq *pReq);
typedef struct {
int32_t vgId1;
@ -2041,18 +2084,24 @@ typedef struct {
int32_t dnodeId1;
int32_t dnodeId2;
int32_t dnodeId3;
int32_t sqlLen;
char* sql;
} SRedistributeVgroupReq;
int32_t tSerializeSRedistributeVgroupReq(void* buf, int32_t bufLen, SRedistributeVgroupReq* pReq);
int32_t tDeserializeSRedistributeVgroupReq(void* buf, int32_t bufLen, SRedistributeVgroupReq* pReq);
void tFreeSRedistributeVgroupReq(SRedistributeVgroupReq *pReq);
typedef struct {
int32_t useless;
int32_t vgId;
int32_t sqlLen;
char* sql;
} SBalanceVgroupLeaderReq;
int32_t tSerializeSBalanceVgroupLeaderReq(void* buf, int32_t bufLen, SBalanceVgroupLeaderReq* pReq);
int32_t tDeserializeSBalanceVgroupLeaderReq(void* buf, int32_t bufLen, SBalanceVgroupLeaderReq* pReq);
void tFreeSBalanceVgroupLeaderReq(SBalanceVgroupLeaderReq *pReq);
typedef struct {
int32_t vgId;
@ -2316,17 +2365,6 @@ int32_t tSerializeSCMCreateStreamReq(void* buf, int32_t bufLen, const SCMCreateS
int32_t tDeserializeSCMCreateStreamReq(void* buf, int32_t bufLen, SCMCreateStreamReq* pReq);
void tFreeSCMCreateStreamReq(SCMCreateStreamReq* pReq);
typedef struct {
char name[TSDB_STREAM_FNAME_LEN];
int64_t streamId;
char* sql;
char* executorMsg;
} SMVCreateStreamReq, SMSCreateStreamReq;
typedef struct {
int64_t streamId;
} SMVCreateStreamRsp, SMSCreateStreamRsp;
enum {
TOPIC_SUB_TYPE__DB = 1,
TOPIC_SUB_TYPE__TABLE,
@ -2348,16 +2386,9 @@ int32_t tSerializeSCMCreateTopicReq(void* buf, int32_t bufLen, const SCMCreateTo
int32_t tDeserializeSCMCreateTopicReq(void* buf, int32_t bufLen, SCMCreateTopicReq* pReq);
void tFreeSCMCreateTopicReq(SCMCreateTopicReq* pReq);
typedef struct {
int64_t topicId;
} SCMCreateTopicRsp;
int32_t tSerializeSCMCreateTopicRsp(void* buf, int32_t bufLen, const SCMCreateTopicRsp* pRsp);
int32_t tDeserializeSCMCreateTopicRsp(void* buf, int32_t bufLen, SCMCreateTopicRsp* pRsp);
typedef struct {
int64_t consumerId;
} SMqConsumerLostMsg, SMqConsumerRecoverMsg, SMqConsumerClearMsg;
} SMqConsumerRecoverMsg, SMqConsumerClearMsg;
typedef struct {
int64_t consumerId;
@ -2369,6 +2400,7 @@ typedef struct {
int8_t autoCommit;
int32_t autoCommitInterval;
int8_t resetOffsetCfg;
int8_t enableReplay;
} SCMSubscribeReq;
static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubscribeReq* pReq) {
@ -2388,6 +2420,7 @@ static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubsc
tlen += taosEncodeFixedI8(buf, pReq->autoCommit);
tlen += taosEncodeFixedI32(buf, pReq->autoCommitInterval);
tlen += taosEncodeFixedI8(buf, pReq->resetOffsetCfg);
tlen += taosEncodeFixedI8(buf, pReq->enableReplay);
return tlen;
}
@ -2411,71 +2444,7 @@ static FORCE_INLINE void* tDeserializeSCMSubscribeReq(void* buf, SCMSubscribeReq
buf = taosDecodeFixedI8(buf, &pReq->autoCommit);
buf = taosDecodeFixedI32(buf, &pReq->autoCommitInterval);
buf = taosDecodeFixedI8(buf, &pReq->resetOffsetCfg);
return buf;
}
typedef struct SMqSubTopic {
int32_t vgId;
int64_t topicId;
SEpSet epSet;
} SMqSubTopic;
typedef struct {
int32_t topicNum;
SMqSubTopic topics[];
} SCMSubscribeRsp;
static FORCE_INLINE int32_t tSerializeSCMSubscribeRsp(void** buf, const SCMSubscribeRsp* pRsp) {
int32_t tlen = 0;
tlen += taosEncodeFixedI32(buf, pRsp->topicNum);
for (int32_t i = 0; i < pRsp->topicNum; i++) {
tlen += taosEncodeFixedI32(buf, pRsp->topics[i].vgId);
tlen += taosEncodeFixedI64(buf, pRsp->topics[i].topicId);
tlen += taosEncodeSEpSet(buf, &pRsp->topics[i].epSet);
}
return tlen;
}
static FORCE_INLINE void* tDeserializeSCMSubscribeRsp(void* buf, SCMSubscribeRsp* pRsp) {
buf = taosDecodeFixedI32(buf, &pRsp->topicNum);
for (int32_t i = 0; i < pRsp->topicNum; i++) {
buf = taosDecodeFixedI32(buf, &pRsp->topics[i].vgId);
buf = taosDecodeFixedI64(buf, &pRsp->topics[i].topicId);
buf = taosDecodeSEpSet(buf, &pRsp->topics[i].epSet);
}
return buf;
}
typedef struct {
int64_t topicId;
int64_t consumerId;
int64_t consumerGroupId;
int64_t offset;
char* sql;
char* logicalPlan;
char* physicalPlan;
} SMVSubscribeReq;
static FORCE_INLINE int32_t tSerializeSMVSubscribeReq(void** buf, SMVSubscribeReq* pReq) {
int32_t tlen = 0;
tlen += taosEncodeFixedI64(buf, pReq->topicId);
tlen += taosEncodeFixedI64(buf, pReq->consumerId);
tlen += taosEncodeFixedI64(buf, pReq->consumerGroupId);
tlen += taosEncodeFixedI64(buf, pReq->offset);
tlen += taosEncodeString(buf, pReq->sql);
tlen += taosEncodeString(buf, pReq->logicalPlan);
tlen += taosEncodeString(buf, pReq->physicalPlan);
return tlen;
}
static FORCE_INLINE void* tDeserializeSMVSubscribeReq(void* buf, SMVSubscribeReq* pReq) {
buf = taosDecodeFixedI64(buf, &pReq->topicId);
buf = taosDecodeFixedI64(buf, &pReq->consumerId);
buf = taosDecodeFixedI64(buf, &pReq->consumerGroupId);
buf = taosDecodeFixedI64(buf, &pReq->offset);
buf = taosDecodeString(buf, &pReq->sql);
buf = taosDecodeString(buf, &pReq->logicalPlan);
buf = taosDecodeString(buf, &pReq->physicalPlan);
buf = taosDecodeFixedI8(buf, &pReq->enableReplay);
return buf;
}
@ -2526,10 +2495,13 @@ typedef struct {
typedef struct {
char name[TSDB_TOPIC_FNAME_LEN];
int8_t igNotExists;
int32_t sqlLen;
char* sql;
} SMDropTopicReq;
int32_t tSerializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
int32_t tDeserializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
void tFreeSMDropTopicReq(SMDropTopicReq *pReq);
typedef struct {
char topic[TSDB_TOPIC_FNAME_LEN];
@ -2625,6 +2597,8 @@ typedef struct SVCreateTbReq {
SSchemaWrapper schemaRow;
} ntb;
};
int32_t sqlLen;
char* sql;
} SVCreateTbReq;
int tEncodeSVCreateTbReq(SEncoder* pCoder, const SVCreateTbReq* pReq);
@ -2636,6 +2610,7 @@ static FORCE_INLINE void tdDestroySVCreateTbReq(SVCreateTbReq* req) {
return;
}
taosMemoryFreeClear(req->sql);
taosMemoryFreeClear(req->name);
taosMemoryFreeClear(req->comment);
if (req->type == TSDB_CHILD_TABLE) {
@ -3099,6 +3074,8 @@ typedef struct {
typedef struct {
char name[TSDB_STREAM_FNAME_LEN];
int8_t igNotExists;
int32_t sqlLen;
char* sql;
} SMDropStreamReq;
typedef struct {
@ -3112,12 +3089,20 @@ typedef struct {
int32_t taskId;
} SVDropStreamTaskReq;
typedef struct {
SMsgHead head;
int64_t streamId;
int32_t taskId;
int64_t dataVer;
} SVStreamTaskVerUpdateReq;
typedef struct {
int8_t reserved;
} SVDropStreamTaskRsp;
int32_t tSerializeSMDropStreamReq(void* buf, int32_t bufLen, const SMDropStreamReq* pReq);
int32_t tDeserializeSMDropStreamReq(void* buf, int32_t bufLen, SMDropStreamReq* pReq);
void tFreeSMDropStreamReq(SMDropStreamReq* pReq);
typedef struct {
char name[TSDB_STREAM_FNAME_LEN];
@ -3276,7 +3261,7 @@ typedef struct {
SMsgHead head;
int64_t streamId;
int32_t taskId;
} SVPauseStreamTaskReq;
} SVPauseStreamTaskReq, SVResetStreamTaskReq;
typedef struct {
int8_t reserved;
@ -3555,6 +3540,7 @@ typedef struct {
int64_t consumerId;
int64_t timeout;
STqOffsetVal reqOffset;
int8_t enableReplay;
} SMqPollReq;
int32_t tSerializeSMqPollReq(void* buf, int32_t bufLen, SMqPollReq* pReq);
@ -3614,6 +3600,7 @@ typedef struct {
SArray* blockData;
SArray* blockTbName;
SArray* blockSchema;
int64_t sleepTime;
} SMqDataRsp;
int32_t tEncodeMqDataRsp(SEncoder* pEncoder, const SMqDataRsp* pRsp);

View File

@ -299,17 +299,17 @@ enum { // WARN: new msg should be appended to segment tail
TD_DEF_MSG_TYPE(TDMT_SYNC_HEARTBEAT, "sync-heartbeat", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_HEARTBEAT_REPLY, "sync-heartbeat-reply", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_LOCAL_CMD, "sync-local-cmd", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_PRE_SNAPSHOT, "sync-pre-snapshot", NULL, NULL) // no longer used
TD_DEF_MSG_TYPE(TDMT_SYNC_PRE_SNAPSHOT_REPLY, "sync-pre-snapshot-reply", NULL, NULL) // no longer used
TD_DEF_MSG_TYPE(TDMT_SYNC_PREP_SNAPSHOT, "sync-prep-snapshot", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_PREP_SNAPSHOT_REPLY, "sync-prep-snapshot-reply", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_FORCE_FOLLOWER, "sync-force-become-follower", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_VND_STREAM_MSG)
// TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY, "vnode-stream-scan-history", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY_FINISH, "vnode-stream-scan-history-finish", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_CHECK_POINT_SOURCE, "vnode-stream-checkpoint-source", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_UPDATE, "vnode-stream-update", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_RESET, "vnode-stream-reset", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_CHECK, "vnode-stream-task-check", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_MAX_MSG, "vnd-stream-max", NULL, NULL)

View File

@ -362,6 +362,8 @@
#define TK_WAL 343
#define TK_NK_SPACE 600
#define TK_NK_COMMENT 601
#define TK_NK_ILLEGAL 602

View File

@ -29,7 +29,7 @@
extern "C" {
#endif
#define AUDIT_DETAIL_MAX 16000
#define AUDIT_DETAIL_MAX 65472
typedef struct {
const char *server;
@ -39,7 +39,8 @@ typedef struct {
int32_t auditInit(const SAuditCfg *pCfg);
void auditSend(SJson *pJson);
void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, char *detail);
void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2,
char *detail, int32_t len);
#ifdef __cplusplus
}

View File

@ -227,6 +227,7 @@ typedef struct SStoreTqReader {
bool (*tqReaderNextBlockInWal)();
bool (*tqNextBlockImpl)(); // todo remove it
SSDataBlock* (*tqGetResultBlock)();
int64_t (*tqGetResultBlockTime)();
void (*tqReaderSetColIdList)();
int32_t (*tqReaderSetQueryTableList)();

View File

@ -507,6 +507,7 @@ typedef struct SBalanceVgroupStmt {
typedef struct SBalanceVgroupLeaderStmt {
ENodeType type;
int32_t vgId;
} SBalanceVgroupLeaderStmt;
typedef struct SMergeVgroupStmt {

View File

@ -35,6 +35,7 @@ typedef struct SRawExprNode {
char* p;
uint32_t n;
SNode* pNode;
bool isPseudoColumn;
} SRawExprNode;
typedef struct SDataType {

View File

@ -29,7 +29,23 @@ extern "C" {
#ifndef _STREAM_H_
#define _STREAM_H_
typedef struct SStreamTask SStreamTask;
#define ONE_MiB_F (1048576.0)
#define ONE_KiB_F (1024.0)
#define SIZE_IN_MiB(_v) ((_v) / ONE_MiB_F)
#define SIZE_IN_KiB(_v) ((_v) / ONE_KiB_F)
#define TASK_DOWNSTREAM_READY 0x0
#define TASK_DOWNSTREAM_NOT_READY 0x1
#define TASK_DOWNSTREAM_NOT_LEADER 0x2
#define TASK_SELF_NEW_STAGE 0x3
#define NODE_ROLE_UNINIT 0x1
#define NODE_ROLE_LEADER 0x2
#define NODE_ROLE_FOLLOWER 0x3
typedef struct SStreamTask SStreamTask;
typedef struct SStreamQueue SStreamQueue;
#define SSTREAM_TASK_VER 2
enum {
@ -64,6 +80,7 @@ enum {
TASK_INPUT_STATUS__NORMAL = 1,
TASK_INPUT_STATUS__BLOCKED,
TASK_INPUT_STATUS__FAILED,
TASK_INPUT_STATUS__REFUSED,
};
enum {
@ -106,6 +123,7 @@ typedef struct {
} SStreamQueueItem;
typedef void FTbSink(SStreamTask* pTask, void* vnode, void* data);
typedef void FSmaSink(void* vnode, int64_t smaId, const SArray* data);
typedef int32_t FTaskExpand(void* ahandle, SStreamTask* pTask, int64_t ver);
typedef struct {
@ -154,8 +172,6 @@ typedef struct {
int64_t size;
} SStreamQueueRes;
void streamFreeQitem(SStreamQueueItem* data);
#if 0
bool streamQueueResEmpty(const SStreamQueueRes* pRes);
int64_t streamQueueResSize(const SStreamQueueRes* pRes);
@ -175,22 +191,9 @@ int32_t streamQueuePush(SStreamQueue1* pQueue, SStreamQueueItem* pItem);
SStreamQueueRes streamQueueGetRes(SStreamQueue1* pQueue);
#endif
typedef struct {
STaosQueue* pQueue;
STaosQall* qall;
void* qItem;
int8_t status;
} SStreamQueue;
int32_t streamInit();
void streamCleanUp();
SStreamQueue* streamQueueOpen(int64_t cap);
void streamQueueClose(SStreamQueue* pQueue, int32_t taskId);
void streamQueueProcessSuccess(SStreamQueue* queue);
void streamQueueProcessFail(SStreamQueue* queue);
void* streamQueueNextItem(SStreamQueue* pQueue);
SStreamDataSubmit* streamDataSubmitNew(SPackedData* pData, int32_t type);
void streamDataSubmitDestroy(SStreamDataSubmit* pDataSubmit);
@ -204,7 +207,7 @@ typedef struct {
int32_t taskId;
int32_t nodeId;
SEpSet epSet;
} STaskDispatcherFixedEp;
} STaskDispatcherFixed;
typedef struct {
char stbFullName[TSDB_TABLE_FNAME_LEN];
@ -222,8 +225,6 @@ typedef struct {
SSHashObj* pTblInfo;
} STaskSinkTb;
typedef void FSmaSink(void* vnode, int64_t smaId, const SArray* data);
typedef struct {
int64_t smaId;
// following are not applicable to encoder and decoder
@ -244,10 +245,10 @@ typedef struct SStreamChildEpInfo {
int64_t stage; // upstream task stage value, to denote if the upstream node has restart/replica changed/transfer
} SStreamChildEpInfo;
typedef struct SStreamTaskKey {
typedef struct STaskId {
int64_t streamId;
int32_t taskId;
} SStreamTaskKey;
int64_t taskId;
} STaskId;
typedef struct SStreamTaskId {
int64_t streamId;
@ -256,19 +257,22 @@ typedef struct SStreamTaskId {
} SStreamTaskId;
typedef struct SCheckpointInfo {
int64_t startTs;
int64_t checkpointId;
int64_t checkpointVer; // latest checkpointId version
int64_t checkpointVer; // latest checkpointId version
int64_t nextProcessVer; // current offset in WAL, not serialize it
int64_t failedId; // record the latest failed checkpoint id
} SCheckpointInfo;
typedef struct SStreamStatus {
int8_t taskStatus;
int8_t downstreamReady; // downstream tasks are all ready now, if this flag is set
int8_t schedStatus;
int8_t keepTaskStatus;
bool appendTranstateBlock; // has append the transfer state data block already, todo: remove it
int8_t timerActive; // timer is active
int8_t pauseAllowed; // allowed task status to be set to be paused
int8_t taskStatus;
int8_t downstreamReady; // downstream tasks are all ready now, if this flag is set
int8_t schedStatus;
int8_t keepTaskStatus;
bool appendTranstateBlock; // has append the transfer state data block already, todo: remove it
int8_t pauseAllowed; // allowed task status to be set to be paused
int32_t timerActive; // timer is active
int32_t inScanHistorySentinel;
} SStreamStatus;
typedef struct SDataRange {
@ -287,21 +291,27 @@ typedef struct SSTaskBasicInfo {
int64_t triggerParam; // in msec
} SSTaskBasicInfo;
typedef struct SStreamDispatchReq SStreamDispatchReq;
typedef struct STokenBucket STokenBucket;
typedef struct SMetaHbInfo SMetaHbInfo;
typedef struct SDispatchMsgInfo {
void* pData; // current dispatch data
SStreamDispatchReq* pData; // current dispatch data
int8_t dispatchMsgType;
int16_t msgType; // dispatch msg type
int32_t retryCount; // retry send data count
int64_t blockingTs; // output blocking timestamp
int64_t startTs; // dispatch start time, record total elapsed time for dispatch
SArray* pRetryList; // current dispatch successfully completed node of downstream
void* pTimer; // used to dispatch data after a given time duration
} SDispatchMsgInfo;
typedef struct STaskOutputInfo {
int8_t type;
typedef struct STaskOutputQueue {
int8_t status;
SStreamQueue* queue;
} STaskOutputInfo;
} STaskOutputQueue;
typedef struct STaskInputInfo {
int8_t status;
int8_t status;
SStreamQueue* queue;
} STaskInputInfo;
@ -310,62 +320,76 @@ typedef struct STaskSchedInfo {
void* pTimer;
} STaskSchedInfo;
typedef struct SSinkTaskRecorder {
typedef struct SSinkRecorder {
int64_t numOfSubmit;
int64_t numOfBlocks;
int64_t numOfRows;
} SSinkTaskRecorder;
int64_t dataSize;
} SSinkRecorder;
typedef struct {
int64_t created;
int64_t init;
int64_t step1Start;
int64_t step2Start;
int64_t sinkStart;
} STaskTimestamp;
typedef struct STaskExecStatisInfo {
int64_t created;
int64_t init;
int64_t start;
int64_t step1Start;
int64_t step2Start;
int32_t updateCount;
int64_t latestUpdateTs;
int32_t processDataBlocks;
int64_t processDataSize;
int32_t dispatch;
int64_t dispatchDataSize;
int32_t checkpoint;
SSinkRecorder sink;
} STaskExecStatisInfo;
typedef struct STokenBucket {
int32_t capacity; // total capacity
int64_t fillTimestamp;// fill timestamp
int32_t numOfToken; // total available tokens
int32_t rate; // number of token per second
} STokenBucket;
typedef struct SHistoryTaskInfo {
STaskId id;
void* pTimer;
int32_t tickCount;
int32_t retryTimes;
int32_t waitInterval;
} SHistoryTaskInfo;
struct SStreamTask {
int64_t ver;
SStreamTaskId id;
SSTaskBasicInfo info;
STaskOutputInfo outputInfo;
STaskInputInfo inputInfo;
STaskSchedInfo schedInfo;
SDispatchMsgInfo msgInfo;
SStreamStatus status;
SCheckpointInfo chkInfo;
STaskExec exec;
SDataRange dataRange;
SStreamTaskId historyTaskId;
SStreamTaskId streamTaskId;
STaskTimestamp tsInfo;
SArray* pReadyMsgList; // SArray<SStreamChkptReadyInfo*>
TdThreadMutex lock; // secure the operation of set task status and puting data into inputQ
SArray* pUpstreamInfoList;
// output
typedef struct STaskOutputInfo {
union {
STaskDispatcherFixedEp fixedEpDispatcher;
STaskDispatcherFixed fixedDispatcher;
STaskDispatcherShuffle shuffleDispatcher;
STaskSinkTb tbSink;
STaskSinkSma smaSink;
STaskSinkFetch fetchSink;
};
SSinkTaskRecorder sinkRecorder;
STokenBucket tokenBucket;
int8_t type;
STokenBucket* pTokenBucket;
} STaskOutputInfo;
void* launchTaskTimer;
SMsgCb* pMsgCb; // msg handle
SStreamState* pState; // state backend
SArray* pRspMsgList;
typedef struct SUpstreamInfo {
SArray* pList;
int32_t numOfClosed;
} SUpstreamInfo;
struct SStreamTask {
int64_t ver;
SStreamTaskId id;
SSTaskBasicInfo info;
STaskOutputQueue outputq;
STaskInputInfo inputInfo;
STaskSchedInfo schedInfo;
STaskOutputInfo outputInfo;
SDispatchMsgInfo msgInfo;
SStreamStatus status;
SCheckpointInfo chkInfo;
STaskExec exec;
SDataRange dataRange;
SHistoryTaskInfo hTaskInfo;
STaskId streamTaskId;
STaskExecStatisInfo execInfo;
SArray* pReadyMsgList; // SArray<SStreamChkptReadyInfo*>
TdThreadMutex lock; // secure the operation of set task status and puting data into inputQ
SMsgCb* pMsgCb; // msg handle
SStreamState* pState; // state backend
SArray* pRspMsgList;
SUpstreamInfo upstreamInfo;
// the followings attributes don't be serialized
int32_t notReadyTasks;
int32_t numOfWaitingUpstream;
@ -381,11 +405,13 @@ struct SStreamTask {
char reserve[256];
};
typedef struct SMetaHbInfo {
tmr_h hbTmr;
int32_t stopFlag;
int32_t tickCounter;
} SMetaHbInfo;
typedef struct STaskStartInfo {
int64_t startTs;
int64_t readyTs;
int32_t startedAfterNodeUpdate;
SHashObj* pReadyTaskSet; // tasks that are all ready for running stream processing
int32_t elapsedTime;
} STaskStartInfo;
// meta
typedef struct SStreamMeta {
@ -393,22 +419,25 @@ typedef struct SStreamMeta {
TDB* db;
TTB* pTaskDb;
TTB* pCheckpointDb;
SHashObj* pTasks;
SArray* pTaskList; // SArray<task_id*>
SHashObj* pTasksMap;
SArray* pTaskList; // SArray<STaskId*>
void* ahandle;
TXN* txn;
FTaskExpand* expandFunc;
int32_t vgId;
int64_t stage;
int32_t role;
STaskStartInfo startInfo;
SRWLatch lock;
int32_t walScanCounter;
void* streamBackend;
int64_t streamBackendRid;
SHashObj* pTaskBackendUnique;
TdThreadMutex backendMutex;
SMetaHbInfo hbInfo;
int32_t closedTask;
int32_t totalTasks; // this value should be increased when a new task is added into the meta
SMetaHbInfo* pHbInfo;
SHashObj* pUpdateTaskSet;
int32_t numOfStreamTasks; // this value should be increased when a new task is added into the meta
int32_t numOfPausedTasks;
int32_t chkptNotReadyTasks;
int64_t rid;
@ -417,26 +446,25 @@ typedef struct SStreamMeta {
SArray* chkpInUse;
int32_t chkpCap;
SRWLatch chkpDirLock;
int32_t pauseTaskNum;
} SStreamMeta;
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
int32_t tDecodeStreamEpInfo(SDecoder* pDecoder, SStreamChildEpInfo* pInfo);
SStreamTask* tNewStreamTask(int64_t streamId, int8_t taskLevel, int8_t fillHistory, int64_t triggerParam,
SArray* pTaskList);
SStreamTask* tNewStreamTask(int64_t streamId, int8_t taskLevel, bool fillHistory, int64_t triggerParam,
SArray* pTaskList, bool hasFillhistory);
int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
void tFreeStreamTask(SStreamTask* pTask);
int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, int64_t ver);
int32_t tDecodeStreamTaskChkInfo(SDecoder* pDecoder, SCheckpointInfo* pChkpInfo);
int32_t tDecodeStreamTaskId(SDecoder* pDecoder, SStreamTaskId* pTaskId);
int32_t tDecodeStreamTaskId(SDecoder* pDecoder, STaskId* pTaskId);
int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem);
int32_t streamTaskPutDataIntoOutputQ(SStreamTask* pTask, SStreamDataBlock* pBlock);
int32_t streamTaskPutTranstateIntoInputQ(SStreamTask* pTask);
bool streamQueueIsFull(const STaosQueue* pQueue, bool inputQ);
bool streamQueueIsFull(const SStreamQueue* pQueue);
typedef struct {
SMsgHead head;
@ -444,11 +472,12 @@ typedef struct {
int32_t taskId;
} SStreamTaskRunReq;
typedef struct {
struct SStreamDispatchReq {
int32_t type;
int64_t stage; // nodeId from upstream task
int64_t streamId;
int32_t taskId;
int32_t msgId; // msg id to identify if the incoming msg from the same sender
int32_t srcVgId;
int32_t upstreamTaskId;
int32_t upstreamChildId;
@ -457,7 +486,7 @@ typedef struct {
int64_t totalLen;
SArray* dataLen; // SArray<int32_t>
SArray* data; // SArray<SRetrieveTableRsp*>
} SStreamDispatchReq;
};
typedef struct {
int64_t streamId;
@ -465,7 +494,9 @@ typedef struct {
int32_t upstreamTaskId;
int32_t downstreamNodeId;
int32_t downstreamTaskId;
int32_t msgId;
int8_t inputStatus;
int64_t stage;
} SStreamDispatchRsp;
typedef struct {
@ -522,7 +553,7 @@ typedef struct {
int32_t downstreamTaskId;
int32_t upstreamNodeId;
int32_t childId;
} SStreamScanHistoryFinishReq, SStreamTransferReq;
} SStreamScanHistoryFinishReq;
int32_t tEncodeStreamScanHistoryFinishReq(SEncoder* pEncoder, const SStreamScanHistoryFinishReq* pReq);
int32_t tDecodeStreamScanHistoryFinishReq(SDecoder* pDecoder, SStreamScanHistoryFinishReq* pReq);
@ -568,9 +599,19 @@ int32_t tEncodeStreamCheckpointReadyMsg(SEncoder* pEncoder, const SStreamCheckpo
int32_t tDecodeStreamCheckpointReadyMsg(SDecoder* pDecoder, SStreamCheckpointReadyMsg* pRsp);
typedef struct STaskStatusEntry {
int64_t streamId;
int32_t taskId;
STaskId id;
int32_t status;
int32_t stage;
int32_t nodeId;
int64_t verStart; // start version in WAL, only valid for source task
int64_t verEnd; // end version in WAL, only valid for source task
int64_t processedVer; // only valid for source task
int64_t activeCheckpointId; // current active checkpoint id
bool checkpointFailed; // denote if the checkpoint is failed or not
double inputQUsed; // in MiB
double inputRate;
double sinkQuota; // existed quota size for sink task
double sinkDataSize; // sink to dest data size
} STaskStatusEntry;
typedef struct SStreamHbMsg {
@ -636,15 +677,14 @@ void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq);
int32_t streamSetupScheduleTrigger(SStreamTask* pTask);
int32_t streamProcessRunReq(SStreamTask* pTask);
int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg, bool exec);
int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg);
int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code);
int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pMsg);
SStreamChildEpInfo* streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t taskId);
void streamTaskInputFail(SStreamTask* pTask);
int32_t streamTryExec(SStreamTask* pTask);
int32_t streamExecTask(SStreamTask* pTask);
int32_t streamSchedExec(SStreamTask* pTask);
bool streamTaskShouldStop(const SStreamStatus* pStatus);
bool streamTaskShouldPause(const SStreamStatus* pStatus);
@ -656,10 +696,14 @@ char* createStreamTaskIdStr(int64_t streamId, int32_t taskId);
// recover and fill history
void streamTaskCheckDownstream(SStreamTask* pTask);
int32_t streamTaskLaunchScanHistory(SStreamTask* pTask);
int32_t streamTaskStartScanHistory(SStreamTask* pTask);
int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_t vgId, int64_t stage);
int32_t streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList);
void streamTaskResetUpstreamStageInfo(SStreamTask* pTask);
bool streamTaskAllUpstreamClosed(SStreamTask* pTask);
bool streamTaskSetSchedStatusWait(SStreamTask* pTask);
int8_t streamTaskSetSchedStatusActive(SStreamTask* pTask);
int8_t streamTaskSetSchedStatusInActive(SStreamTask* pTask);
int32_t streamTaskStop(SStreamTask* pTask);
int32_t streamSendCheckRsp(const SStreamMeta* pMeta, const SStreamTaskCheckReq* pReq, SStreamTaskCheckRsp* pRsp,
@ -670,14 +714,15 @@ int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask);
int32_t streamStartScanHistoryAsync(SStreamTask* pTask, int8_t igUntreated);
bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer);
int32_t streamQueueGetNumOfItems(const SStreamQueue* pQueue);
int32_t streamQueueGetAvailableSpace(const SStreamQueue* pQueue, int32_t* availNum, double* availSize);
// common
int32_t streamRestoreParam(SStreamTask* pTask);
int32_t streamSetStatusNormal(SStreamTask* pTask);
int32_t streamSetStatusUnint(SStreamTask* pTask);
const char* streamGetTaskStatusStr(int32_t status);
void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta);
void streamTaskResume(SStreamTask* pTask, SStreamMeta* pMeta);
void streamTaskHalt(SStreamTask* pTask);
void streamTaskResumeFromHalt(SStreamTask* pTask);
void streamTaskDisablePause(SStreamTask* pTask);
void streamTaskEnablePause(SStreamTask* pTask);
@ -690,6 +735,9 @@ int32_t streamTaskReloadState(SStreamTask* pTask);
void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId);
void streamTaskOpenAllUpstreamInput(SStreamTask* pTask);
void streamTaskStatusInit(STaskStatusEntry* pEntry, const SStreamTask* pTask);
void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc);
// source level
int32_t streamSetParamForStreamScannerStep1(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
@ -707,24 +755,27 @@ void streamMetaCleanup();
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId, int64_t stage);
void streamMetaClose(SStreamMeta* streamMeta);
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask); // save to stream meta store
int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int64_t* pKey);
int32_t streamMetaRemoveTask(SStreamMeta* pMeta, STaskId* pKey);
int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask, bool* pAdded);
int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta);
int32_t streamMetaGetNumOfStreamTasks(SStreamMeta* pMeta);
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
int32_t streamMetaReopen(SStreamMeta* pMeta, int64_t chkpId);
int32_t streamMetaReopen(SStreamMeta* pMeta);
int32_t streamMetaCommit(SStreamMeta* pMeta);
int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta);
void streamMetaNotifyClose(SStreamMeta* pMeta);
void streamMetaStartHb(SStreamMeta* pMeta);
void streamMetaInitForSnode(SStreamMeta* pMeta);
// checkpoint
int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);
int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask);
void streamTaskClearCheckInfo(SStreamTask* pTask);
int32_t streamAlignTransferState(SStreamTask* pTask);
int32_t streamBuildAndSendDropTaskMsg(SMsgCb* pMsgCb, int32_t vgId, SStreamTaskId* pTaskId);
int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SStreamTask* pTask,
int8_t isSucceed);
int32_t buildCheckpointSourceRsp(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SRpcMsg* pMsg,

View File

@ -79,6 +79,9 @@ int32_t getSessionFlushedBuff(SStreamFileState* pFileState, SSessionKey* pKey, v
int32_t deleteSessionWinStateBuffFn(void* pBuff, const void *key, size_t keyLen);
int32_t deleteSessionWinStateBuffByPosFn(SStreamFileState* pFileState, SRowBuffPos* pPos);
SRowBuffPos* createSessionWinBuff(SStreamFileState* pFileState, SSessionKey* pKey, void* p, int32_t* pVLen);
int32_t recoverSesssion(SStreamFileState* pFileState, int64_t ckId);
void sessionWinStateClear(SStreamFileState* pFileState);
void sessionWinStateCleanup(void* pBuff);

View File

@ -36,8 +36,7 @@ extern "C" {
#define SYNC_DEL_WAL_MS (1000 * 60)
#define SYNC_ADD_QUORUM_COUNT 3
#define SYNC_VNODE_LOG_RETENTION (TSDB_SYNC_LOG_BUFFER_RETENTION + 1)
#define SNAPSHOT_MAX_CLOCK_SKEW_MS 1000 * 10
#define SNAPSHOT_WAIT_MS 1000 * 30
#define SNAPSHOT_WAIT_MS 1000 * 5
#define SYNC_MAX_RETRY_BACKOFF 5
#define SYNC_LOG_REPL_RETRY_WAIT_MS 100
@ -87,6 +86,11 @@ typedef enum {
TAOS_SYNC_ROLE_ERROR = 2,
} ESyncRole;
typedef enum {
SYNC_FSM_STATE_COMPLETE = 0,
SYNC_FSM_STATE_INCOMPLETE,
} ESyncFsmState;
typedef struct SNodeInfo {
int64_t clusterId;
int32_t nodeId;
@ -95,6 +99,12 @@ typedef struct SNodeInfo {
ESyncRole nodeRole;
} SNodeInfo;
typedef struct SSyncTLV {
int32_t typ;
int32_t len;
char val[];
} SSyncTLV;
typedef struct SSyncCfg {
int32_t totalReplicaNum;
int32_t replicaNum;
@ -139,10 +149,13 @@ typedef struct SReConfigCbMeta {
typedef struct SSnapshotParam {
SyncIndex start;
SyncIndex end;
SSyncTLV* data;
} SSnapshotParam;
typedef struct SSnapshot {
void* data;
int32_t type;
SSyncTLV* data;
ESyncFsmState state;
SyncIndex lastApplyIndex;
SyncTerm lastApplyTerm;
SyncIndex lastConfigIndex;
@ -171,7 +184,7 @@ typedef struct SSyncFSM {
void (*FpBecomeLearnerCb)(const struct SSyncFSM* pFsm);
int32_t (*FpGetSnapshot)(const struct SSyncFSM* pFsm, SSnapshot* pSnapshot, void* pReaderParam, void** ppReader);
void (*FpGetSnapshotInfo)(const struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
int32_t (*FpGetSnapshotInfo)(const struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
int32_t (*FpSnapshotStartRead)(const struct SSyncFSM* pFsm, void* pReaderParam, void** ppReader);
void (*FpSnapshotStopRead)(const struct SSyncFSM* pFsm, void* pReader);

View File

@ -163,6 +163,7 @@ int rpcReleaseHandle(void *handle, int8_t type); // just release conn to rpc in
// These functions will not be called in the child process
int rpcSendRequestWithCtx(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid, SRpcCtx *ctx);
int rpcSendRecv(void *shandle, SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp);
int rpcSendRecvWithTimeout(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp, int32_t timeoutMs);
int rpcSetDefaultAddr(void *thandle, const char *ip, const char *fqdn);
void *rpcAllocHandle();
void rpcSetIpWhite(void *thandl, void *arg);

View File

@ -225,7 +225,10 @@ void syslog(int unused, const char *format, ...);
#endif
#else
// Windows
#define setThreadName(name)
#define setThreadName(name) \
do { \
pthread_setname_np(taosThreadSelf(), (name)); \
} while (0)
#endif
#if defined(_WIN32)

View File

@ -54,6 +54,17 @@ typedef int32_t (*__ext_compar_fn_t)(const void *p1, const void *p2, const void
*/
void taosqsort(void *src, int64_t numOfElem, int64_t size, const void *param, __ext_compar_fn_t comparFn);
/**
* merge sort, with the compare function requiring additional parameters support
*
* @param src
* @param numOfElem
* @param size
* @param comparFn
* @return int32_t 0 for success, other for failure.
*/
int32_t taosMergeSort(void *src, int64_t numOfElem, int64_t size, __compar_fn_t comparFn);
/**
* binary search, with range support
*

View File

@ -557,7 +557,7 @@ int32_t* taosGetErrno();
// #define TSDB_CODE_SYN_TOO_MANY_FWDINFO TAOS_DEF_ERROR_CODE(0, 0x0904) // 2.x
// #define TSDB_CODE_SYN_MISMATCHED_PROTOCOL TAOS_DEF_ERROR_CODE(0, 0x0905) // 2.x
// #define TSDB_CODE_SYN_MISMATCHED_CLUSTERID TAOS_DEF_ERROR_CODE(0, 0x0906) // 2.x
// #define TSDB_CODE_SYN_MISMATCHED_SIGNATURE TAOS_DEF_ERROR_CODE(0, 0x0907) // 2.x
#define TSDB_CODE_SYN_MISMATCHED_SIGNATURE TAOS_DEF_ERROR_CODE(0, 0x0907)
// #define TSDB_CODE_SYN_INVALID_CHECKSUM TAOS_DEF_ERROR_CODE(0, 0x0908) // 2.x
// #define TSDB_CODE_SYN_INVALID_MSGLEN TAOS_DEF_ERROR_CODE(0, 0x0909) // 2.x
// #define TSDB_CODE_SYN_INVALID_MSGTYPE TAOS_DEF_ERROR_CODE(0, 0x090A) // 2.x
@ -799,6 +799,8 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TMQ_NEED_INITIALIZED TAOS_DEF_ERROR_CODE(0, 0x4010)
#define TSDB_CODE_TMQ_NO_COMMITTED TAOS_DEF_ERROR_CODE(0, 0x4011)
#define TSDB_CODE_TMQ_SAME_COMMITTED_VALUE TAOS_DEF_ERROR_CODE(0, 0x4012)
#define TSDB_CODE_TMQ_REPLAY_NEED_ONE_VGROUP TAOS_DEF_ERROR_CODE(0, 0x4013)
#define TSDB_CODE_TMQ_REPLAY_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x4014)
// stream
#define TSDB_CODE_STREAM_TASK_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x4100)

View File

@ -214,12 +214,19 @@ void taosArrayDestroyEx(SArray* pArray, FDelete fp);
void taosArraySwap(SArray* a, SArray* b);
/**
* sort the array
* sort the array use qsort
* @param pArray
* @param compar
*/
void taosArraySort(SArray* pArray, __compar_fn_t comparFn);
/**
* sort the array use merge sort
* @param pArray
* @param compar
*/
int32_t taosArrayMSort(SArray* pArray, __compar_fn_t comparFn);
/**
* search the array
* @param pArray

View File

@ -165,6 +165,13 @@ static FORCE_INLINE int32_t tarray2SortInsert(void *arr, const void *elePtr, int
#define TARRAY2_FOREACH_PTR_REVERSE(a, ep) \
for (int32_t __i = (a)->size - 1; __i >= 0 && ((ep) = &(a)->data[__i], 1); __i--)
#define TARRAY2_SORT(a, cmp) \
do { \
if ((a)->size > 1) { \
taosSort((a)->data, (a)->size, sizeof((a)->data[0]), (__compar_fn_t)cmp); \
} \
} while (0)
#ifdef __cplusplus
}
#endif

View File

@ -24,6 +24,9 @@
extern "C" {
#endif
#define HASH_FUNCTION_1 taosFastHash
#define HASH_FUNCTION_2 taosDJB2Hash
typedef struct SBloomFilter {
uint32_t hashFunctions;
uint64_t expectedEntries;
@ -37,8 +40,9 @@ typedef struct SBloomFilter {
} SBloomFilter;
SBloomFilter *tBloomFilterInit(uint64_t expectedEntries, double errorRate);
int32_t tBloomFilterPutHash(SBloomFilter *pBF, uint64_t hash1, uint64_t hash2);
int32_t tBloomFilterPut(SBloomFilter *pBF, const void *keyBuf, uint32_t len);
int32_t tBloomFilterNoContain(const SBloomFilter *pBF, const void *keyBuf, uint32_t len);
int32_t tBloomFilterNoContain(const SBloomFilter *pBF, uint64_t h1, uint64_t h2);
void tBloomFilterDestroy(SBloomFilter *pBF);
void tBloomFilterDump(const SBloomFilter *pBF);
bool tBloomFilterIsFull(const SBloomFilter *pBF);

View File

@ -249,7 +249,7 @@ typedef enum ELogicConditionType {
#define TSDB_PASSWORD_LEN 32
#define TSDB_USET_PASSWORD_LEN 129
#define TSDB_VERSION_LEN 32
#define TSDB_LABEL_LEN 8
#define TSDB_LABEL_LEN 12
#define TSDB_JOB_STATUS_LEN 32
#define TSDB_CLUSTER_ID_LEN 40
@ -382,6 +382,7 @@ typedef enum ELogicConditionType {
#define TSDB_MAX_STT_TRIGGER 1
#define TSDB_DEFAULT_SST_TRIGGER 1
#endif
#define TSDB_STT_TRIGGER_ARRAY_SIZE 16 // maximum of TSDB_MAX_STT_TRIGGER of TD_ENTERPRISE and TD_COMMUNITY
#define TSDB_MIN_HASH_PREFIX (2 - TSDB_TABLE_NAME_LEN)
#define TSDB_MAX_HASH_PREFIX (TSDB_TABLE_NAME_LEN - 2)
#define TSDB_DEFAULT_HASH_PREFIX 0

View File

@ -55,6 +55,7 @@ extern int32_t tmrDebugFlag;
extern int32_t uDebugFlag;
extern int32_t rpcDebugFlag;
extern int32_t qDebugFlag;
extern int32_t stDebugFlag;
extern int32_t wDebugFlag;
extern int32_t sDebugFlag;
extern int32_t tsdbDebugFlag;

View File

@ -26,9 +26,12 @@ typedef struct SScalableBf {
SArray *bfArray; // array of bloom filters
uint32_t growth;
uint64_t numBits;
_hash_fn_t hashFn1;
_hash_fn_t hashFn2;
} SScalableBf;
SScalableBf *tScalableBfInit(uint64_t expectedEntries, double errorRate);
int32_t tScalableBfPutNoCheck(SScalableBf *pSBf, const void *keyBuf, uint32_t len);
int32_t tScalableBfPut(SScalableBf *pSBf, const void *keyBuf, uint32_t len);
int32_t tScalableBfNoContain(const SScalableBf *pSBf, const void *keyBuf, uint32_t len);
void tScalableBfDestroy(SScalableBf *pSBf);

View File

@ -98,6 +98,9 @@
# enable/disable system monitor
# monitor 1
# enable/disable audit log
# audit 1
# The following parameter is used to limit the maximum number of lines in log files.
# max number of lines per log filters
# numOfLogLines 10000000

View File

@ -8,7 +8,7 @@ Type=simple
ExecStart=/usr/bin/taosd
ExecStartPre=/usr/local/taos/bin/startPre.sh
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0

View File

@ -89,7 +89,7 @@ else
${build_dir}/bin/taosBenchmark \
${build_dir}/bin/TDinsight.sh \
${build_dir}/bin/tdengine-datasource.zip \
${build_dir}/bin/tdengine-datasource.zip.md5sum"
${build_dir}/bin/tdengine-datasource.zip.md5"
fi
[ -f ${build_dir}/bin/taosx ] && taosx_bin="${build_dir}/bin/taosx"

View File

@ -719,6 +719,21 @@ int taos_init() {
int taos_options_imp(TSDB_OPTION option, const char *str) {
if (option == TSDB_OPTION_CONFIGDIR) {
#ifndef WINDOWS
char newstr[PATH_MAX];
int len = strlen(str);
if (len > 1 && str[0] != '"' && str[0] != '\'') {
if (len + 2 >= PATH_MAX) {
tscError("Too long path %s", str);
return -1;
}
newstr[0] = '"';
strncpy(newstr+1, str, len);
newstr[len + 1] = '"';
newstr[len + 2] = '\0';
str = newstr;
}
#endif
tstrncpy(configDir, str, PATH_MAX);
tscInfo("set cfg:%s to %s", configDir, str);
return 0;

View File

@ -157,6 +157,10 @@ STscObj* taos_connect_internal(const char* ip, const char* user, const char* pas
tscDebug("new app inst mgr %p, user:%s, ip:%s, port:%d", p, user, epSet.epSet.eps[0].fqdn, epSet.epSet.eps[0].port);
pInst = &p;
} else {
ASSERTS((*pInst) && (*pInst)->pAppHbMgr, "*pInst:%p, pAppHgMgr:%p", *pInst, (*pInst) ? (*pInst)->pAppHbMgr : NULL);
// reset to 0 in case of conn with duplicated user key but its user has ever been dropped.
atomic_store_8(&(*pInst)->pAppHbMgr->connHbFlag, 0);
}
taosThreadMutexUnlock(&appInfo.mutex);

View File

@ -377,6 +377,7 @@ _exit:
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
pCreateReq = req.pReqs + iReq;
taosMemoryFreeClear(pCreateReq->comment);
taosMemoryFreeClear(pCreateReq->sql);
if (pCreateReq->type == TSDB_CHILD_TABLE) {
taosArrayDestroy(pCreateReq->ctb.tagName);
}

Some files were not shown because too many files have changed in this diff Show More