Merge remote-tracking branch 'origin/3.0' into feat/TD-22746

This commit is contained in:
dapan1121 2023-04-25 10:07:20 +08:00
commit cde97b6bc1
679 changed files with 31584 additions and 14237 deletions

View File

@ -10,6 +10,8 @@ if (NOT DEFINED TD_SOURCE_DIR)
set( TD_SOURCE_DIR ${PROJECT_SOURCE_DIR} )
endif()
SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
set(TD_SUPPORT_DIR "${TD_SOURCE_DIR}/cmake")
set(TD_CONTRIB_DIR "${TD_SOURCE_DIR}/contrib")

47
CODE_OF_CONDUCT-CN.md Normal file
View File

@ -0,0 +1,47 @@
# 贡献者契约行为准则
[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-v1.4%20adopted-ff69b4.svg)](code_of_conduct.md)
## 我们的承诺
为了营造一个开放和热情的环境,作为贡献者和维护者,我们承诺让每个人参与我们的项目和社区成为一种无骚扰的体验,无论年龄、体型、残疾、种族、性别特征、性别认同和表达、经验水平、教育、社会经济地位、国籍、个人外表、种族、宗教或性认同和取向如何。
## 我们的标准
有助于创造积极环境的行为示例包括:
- 使用热情和包容的语言
- 尊重不同的观点和经历
- 优雅地接受建设性的批评
- 专注于对社区最有利的事情
- 对其他社区成员表示同情
参与者不可接受的行为示例包括:
- 使用性感的语言或图像以及不受欢迎的性关注或进步
- 拖钓、侮辱/贬损评论以及人身或政治攻击
- 公共或私人骚扰
- 未经明确许可发布他人的私人信息,例如物理地址或电子地址
- 在专业环境中可能被合理认为不适当的其他行为
## 我们的责任
项目维护人员负责阐明可接受行为的标准,并期望针对任何不可接受行为的情况采取适当和公平的纠正措施。
项目维护者有权利和责任删除、编辑或拒绝评论、提交、代码、wiki 编辑、问题和其他不符合本行为准则的贡献,或暂时或永久禁止任何贡献者从事他们认为不适当、威胁、冒犯或有害的其他行为。
## 范围
本行为准则适用于所有项目空间,也适用于个人在公共场所代表项目或其社区时。 代表项目或社区的示例包括使用官方项目电子邮件地址、通过官方社交媒体帐户发布信息或在在线或离线活动中担任指定代表。 项目的表示可以由项目维护者进一步定义和澄清。
## 执法
可以通过 support@taosdata.com 联系项目团队来报告辱骂、骚扰或其他不可接受的行为。 所有投诉都将被审查和调查,并将产生被认为必要且适合具体情况的回应。 项目团队有义务对事件的报告者保密。 具体执行政策的更多细节可能会单独发布。
不善意遵守或执行行为准则的项目维护者可能会面临由项目领导的其他成员确定的临时或永久影响。
## 来源
本行为准则改编自贡献者公约 1.4 版,可在 https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 获取
有关此行为准则的常见问题的答案,请参阅 https://www.contributor-covenant.org/faq

View File

@ -460,7 +460,7 @@ pipeline {
cd ${WKC}/tests/parallel_test
export DEFAULT_RETRY_TIME=2
date
''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 480 ''' + extra_param + '''
''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 600 ''' + extra_param + '''
'''
}
}

View File

@ -121,7 +121,7 @@ ELSE ()
MESSAGE(STATUS "Compile with Address Sanitizer!")
ELSE ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
ENDIF ()
# disable all assert

View File

@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "3.0.3.0")
SET(TD_VER_NUMBER "3.0.4.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
@ -16,7 +16,7 @@ find_program(HAVE_GIT NAMES git)
IF (DEFINED GITINFO)
SET(TD_VER_GIT ${GITINFO})
ELSEIF (HAVE_GIT)
execute_process(COMMAND git log -1 --format=%H WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE GIT_COMMITID)
execute_process(COMMAND git log -1 --format=%H WORKING_DIRECTORY ${TD_COMMUNITY_DIR} OUTPUT_VARIABLE GIT_COMMITID)
#message(STATUS "git log result:${GIT_COMMITID}")
IF (GIT_COMMITID)
string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID})
@ -30,6 +30,23 @@ ELSE ()
SET(TD_VER_GIT "no git commit id")
ENDIF ()
IF (DEFINED GITINFOI)
SET(TD_VER_GIT_INTERNAL ${GITINFOI})
ELSEIF (HAVE_GIT)
execute_process(COMMAND git log -1 --format=%H WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} OUTPUT_VARIABLE GIT_COMMITID)
message(STATUS "git log result:${GIT_COMMITID}")
IF (GIT_COMMITID)
string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID})
SET(TD_VER_GIT_INTERNAL ${GIT_COMMITID})
ELSE ()
message(STATUS "not a git repository")
SET(TD_VER_GIT "no git commit id")
ENDIF ()
ELSE ()
message(STATUS "no git cmd")
SET(TD_VER_GIT_INTERNAL "no git commit id")
ENDIF ()
IF (DEFINED VERDATE)
SET(TD_VER_DATE ${VERDATE})
ELSE ()

View File

@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG d8059ff
GIT_TAG e02ddb2
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG d9ec91d
GIT_TAG ffc2e6f
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

9
compile_flags.txt Normal file
View File

@ -0,0 +1,9 @@
-DLINUX
-DWEBSOCKET
-I/usr/include
-Iinclude
-Iinclude/os
-Iinclude/common
-Iinclude/util
-Iinclude/libs/transport
-Itools/shell/inc

View File

@ -188,7 +188,7 @@ You can use the TDengine CLI to monitor your TDengine deployment and execute ad
<TabItem label="Windows" value="windows">
After the installation is complete, run `C:\TDengine\taosd.exe` to start TDengine Server.
After the installation is complete, please run `sc start taosd` or run `C:\TDengine\taosd.exe` with administrator privilege to start TDengine Server.
## Command Line Interface (CLI)

View File

@ -23,7 +23,7 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i
To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers.
Tips:The default data subscription is to consume data from the wal. If the wal is deleted, the consumed data will be incomplete. At this time, you can set the parameter experimental.snapshot.enable to true to obtain all data from the tsdb, but in this way, the consumption order of the data cannot be guaranteed. Therefore, it is recommended to set a reasonable retention policy for WAL based on your consumption situation to ensure that you can subscribe all data from WAL.
## Data Schema and API
@ -285,18 +285,17 @@ You configure the following parameters when creating a consumer:
| Parameter | Type | Description | Remarks |
| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
| `td.connect.ip` | string | Used in establishing a connection; same as `taos_connect` | |
| `td.connect.user` | string | Used in establishing a connection; same as `taos_connect` | |
| `td.connect.pass` | string | Used in establishing a connection; same as `taos_connect` | |
| `td.connect.port` | string | Used in establishing a connection; same as `taos_connect` | |
| `td.connect.ip` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
| `td.connect.user` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
| `td.connect.pass` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
| `td.connect.port` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. |
| `client.id` | string | Client ID | Maximum length: 192. |
| `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
| `enable.auto.commit` | boolean | Commit automatically | Specify `true` or `false`. |
| `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true |
| `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds |
| `enable.heartbeat.background` | boolean | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | |
| `experimental.snapshot.enable` | boolean | Specify whether to consume messages from TSDB | |
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages |
| `experimental.snapshot.enable` | boolean | Specify whether to consume data in TSDB; true: both data in WAL and in TSDB can be consumed; false: only data in WAL can be consumed | default value: false |
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages | default value: false
The method of specifying these parameters depends on the language used:
@ -369,7 +368,6 @@ conf := &tmq.ConfigMap{
"td.connect.port": "6030",
"client.id": "test_tmq_c",
"enable.auto.commit": "false",
"enable.heartbeat.background": "true",
"experimental.snapshot.enable": "true",
"msg.with.table.name": "true",
}

View File

@ -35,8 +35,8 @@ database_option: {
| TABLE_SUFFIX value
| TSDB_PAGESIZE value
| WAL_RETENTION_PERIOD value
| WAL_ROLL_PERIOD value
| WAL_RETENTION_SIZE value
| WAL_ROLL_PERIOD value
| WAL_SEGMENT_SIZE value
}
```
@ -75,11 +75,10 @@ database_option: {
- TABLE_PREFIXThe prefix length in the table name that is ignored when distributing table to vnode based on table name.
- TABLE_SUFFIXThe suffix length in the table name that is ignored when distributing table to vnode based on table name.
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept after consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep after consumption. -1: the time of WAL files to keep has no upper limit.
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept after consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that WAL files are not required to keep after consumption. -1: the total size of WAL files to keep has no upper limit.
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep for consumption. Alter it with a proper value at first to create topics.
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit.
- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after TSDB data in memory are flushed to disk.
- WAL_SEGMENT_SIZE: specifies the maximum size of a WAL file. After the current WAL file reaches this size, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after TSDB data in memory are flushed to disk.
### Example Statement
```sql
@ -123,6 +122,8 @@ alter_database_option: {
| WAL_LEVEL value
| WAL_FSYNC_PERIOD value
| KEEP value
| WAL_RETENTION_PERIOD value
| WAL_RETENTION_SIZE value
}
```
@ -179,6 +180,14 @@ TRIM DATABASE db_name;
The preceding SQL statement deletes data that has expired and orders the remaining data in accordance with the storage configuration.
## Flush Data
```sql
FLUSH DATABASE db_name;
```
Flush data from memory onto disk. Before shutting down a node, executing this command can avoid data restore after restarting and speed up the startup process.
## Redistribute Vgroup
```sql

View File

@ -13,12 +13,11 @@ create_definition:
col_name column_definition
column_definition:
type_name [COMMENT 'string_value']
type_name
```
**More explanations**
- Each supertable can have a maximum of 4096 columns, including tags. The minimum number of columns is 3: a timestamp column used as the key, one tag column, and one data column.
- When you create a supertable, you can add comments to columns and tags.
- The TAGS keyword defines the tag columns for the supertable. The following restrictions apply to tag columns:
- A tag column can use the TIMESTAMP data type, but the values in the column must be fixed numbers. Timestamps including formulae, such as "now + 10s", cannot be stored in a tag column.
- The name of a tag column cannot be the same as the name of any other column.

View File

@ -248,13 +248,13 @@ You can also use the NULLS keyword to specify the position of null values. Ascen
The LIMIT keyword controls the number of results that are displayed. You can also use the OFFSET keyword to specify the result to display first. `LIMIT` and `OFFSET` are executed after `ORDER BY` in the query execution. You can include an offset in a LIMIT clause. For example, LIMIT 5 OFFSET 2 can also be written LIMIT 2, 5. Both of these clauses display the third through the seventh results.
In a statement that includes a PARTITION BY clause, the LIMIT keyword is performed on each partition, not on the entire set of results.
In a statement that includes a PARTITION BY/GROUP BY clause, the LIMIT keyword is performed on each partition/group, not on the entire set of results.
## SLIMIT
The SLIMIT keyword is used with a PARTITION BY clause to control the number of partitions that are displayed. You can include an offset in a SLIMIT clause. For example, SLIMIT 5 OFFSET 2 can also be written LIMIT 2, 5. Both of these clauses display the third through the seventh partitions.
The SLIMIT keyword is used with a PARTITION BY/GROUP BY clause to control the number of partitions/groups that are displayed. You can include an offset in a SLIMIT clause. For example, SLIMIT 5 OFFSET 2 can also be written LIMIT 2, 5. Both of these clauses display the third through the seventh partitions/groups.
Note: If you include an ORDER BY clause, only one partition can be displayed.
Note: If you include an ORDER BY clause, only one partition/group can be displayed.
## Special Query

View File

@ -459,12 +459,17 @@ TO_JSON(str_literal)
#### TO_UNIXTIMESTAMP
```sql
TO_UNIXTIMESTAMP(expr)
TO_UNIXTIMESTAMP(expr [, return_timestamp])
return_timestamp: {
0
| 1
}
```
**Description**: UNIX timestamp converted from a string of date/time format
**Return value type**: BIGINT
**Return value type**: BIGINT, TIMESTAMP
**Applicable column types**: VARCHAR and NCHAR
@ -476,6 +481,7 @@ TO_UNIXTIMESTAMP(expr)
- The input string must be compatible with ISO8601/RFC3339 standard, NULL will be returned if the string can't be converted
- The precision of the returned timestamp is same as the precision set for the current data base in use
- return_timestamp indicates whether the returned value type is TIMESTAMP or not. If this parameter set to 1, function will return TIMESTAMP type. Otherwise function will return BIGINT type. If parameter is omitted, default return value type is BIGINT.
### Time and Date Functions

View File

@ -75,6 +75,16 @@ These pseudocolumns occur after the aggregation clause.
5. LINEARFill with the closest non-NULL value, `FILL(LINEAR)`
6. NEXTFill with the next non-NULL value, `FILL(NEXT)`
In the above filling modes, except for `NONE` mode, the `fill` clause will be ignored if there is no data in the defined time range, i.e. no data would be filled and the query result would be empty. This behavior is reasonable when the filling mode is `PREV`, `NEXT`, `LINEAR`, because filling can't be performed if there is not any data. For filling modes `NULL` and `VALUE`, however, filling can be performed even though there is not any data, filling or not depends on the choice of user's application. To accomplish the need of this force filling behavior and not break the behavior of existing filling modes, TDengine added two new filling modes since version 3.0.3.0.
1. NULL_F: Fill `NULL` by force
2. VALUE_F: Fill `VALUE` by force
The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described below
- When used with `INTERVAL`: `NULL_F` and `VALUE_F` are filling by force`NULL` and `VALUE` don't fill by force. The behavior of each filling mode is exactly same as what the name suggests.
- When used with `INTERVAL` in stream processing: `NULL_F` and `NULL` are same, i.e. don't fill by force; `VALUE_F` and `VALUE` and same, i.e. don't fill by force. It's suggested that there is no filling by force in stream processing.
- When used with `INTERP`: `NULL` and `NULL_F` and same, i.e. filling by force; `VALUE` and `VALUE_F` are same, i.e. filling by force. It's suggested that there is always filling by force when used with `INTERP`.
:::info
1. A huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum number of interpolation values that can be returned in a single query is 10,000,000.
@ -161,7 +171,7 @@ If you want to perform event window based query on the result set of a sub-query
For example, the diagram below illustrates the event windows generated by the query below:
```sql
select _wstart, _wend, count(*) from t start with c1 > 0 end with c2 < 10
select _wstart, _wend, count(*) from t event_window start with c1 > 0 end with c2 < 10
```
![Event Window Illustration](./event_window.webp)

View File

@ -69,7 +69,7 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
| 1 | consumer_id | BIGINT | Consumer ID |
| 2 | consumer_group | BINARY(192) | Consumer group |
| 3 | client_id | BINARY(192) | Client ID (user-defined) |
| 4 | status | BINARY(20) | Consumer status |
| 4 | status | BINARY(20) | Consumer status. All possible status include: ready(consumer is in normal state), lost(the connection between consumer and mnode is broken), rebalance(the redistribution of vgroups that belongs to current consumer is now in progress), unknown(consumer is in invalid state)
| 5 | topics | BINARY(204) | Subscribed topic. Returns one row for each topic. |
| 6 | up_time | TIMESTAMP | Time of first connection to TDengine Server |
| 7 | subscribe_time | TIMESTAMP | Time of first subscription |

View File

@ -308,9 +308,11 @@ Query OK, 24 row(s) in set (0.002444s)
</code></pre>
</details>
The above show the block distribution percentage according to the number of rows in each block. In the above example, we can get below information:
- `_block_dist: 3483 ||||||||||||||||| 1 (20.00%)` means there is one block whose rows is between 3,483 and 3,681.
- `_block_dist: 3881 ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 4 (80.00%)` means there are 4 blocks whose rows is between 3,881 and 4,096. - The number of blocks whose rows fall in other range is zero.
The above show the block distribution percentage according to the number of rows in each block. In the above example, we can get below information:
- `_block_dist: 3483 ||||||||||||||||| 1 (20.00%)` means there is one block whose rows is between 3,483 and 3,681.
- `_block_dist: 3881 ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 4 (80.00%)` means there are 4 blocks whose rows is between 3,881 and 4,096. - The number of blocks whose rows fall in other range is zero.
Note that only the information about the data blocks in the data file will be displayed here, and the information about the data in the stt file will not be displayed.
## SHOW TAGS

View File

@ -27,7 +27,7 @@ The following data types can be used in the schema for standard tables.
| - | :------- | :-------- | :------- |
| 1 | ALTER ACCOUNT | Deprecated| This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
| 2 | ALTER ALL DNODES | Added | Modifies the configuration of all dnodes.
| 3 | ALTER DATABASE | Modified | Deprecated<ul><li>QUORUM: Specified the required number of confirmations. STRICT is now used to specify strong or weak consistency. The STRICT parameter cannot be modified. </li><li>BLOCKS: Specified the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>COMP: Cannot be modified. <br/>Added</li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. <br/>Modified</li><li>REPLICA: Cannot be modified. </li><li>KEEP: Now supports units. </li></ul>
| 3 | ALTER DATABASE | Modified | Deprecated<ul><li>QUORUM: Specified the required number of confirmations. TDengine 3.0 provides strict consistency by default and doesn't allow to change to weak consitency. </li><li>BLOCKS: Specified the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>COMP: Cannot be modified. <br/>Added</li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. <br/>Modified</li><li>REPLICA: Cannot be modified. </li><li>KEEP: Now supports units. </li></ul>
| 4 | ALTER STABLE | Modified | Deprecated<ul><li>CHANGE TAG: Modified the name of a tag. Replaced by RENAME TAG. <br/>Added</li><li>RENAME TAG: Replaces CHANGE TAG. </li><li>COMMENT: Specifies comments for a supertable. </li></ul>
| 5 | ALTER TABLE | Modified | Deprecated<ul><li>CHANGE TAG: Modified the name of a tag. Replaced by RENAME TAG. <br/>Added</li><li>RENAME TAG: Replaces CHANGE TAG. </li><li>COMMENT: Specifies comments for a standard table. </li><li>TTL: Specifies the time-to-live for a standard table. </li></ul>
| 6 | ALTER USER | Modified | Deprecated<ul><li>PRIVILEGE: Specified user permissions. Replaced by GRANT and REVOKE. <br/>Added</li><li>ENABLE: Enables or disables a user. </li><li>SYSINFO: Specifies whether a user can query system information. </li></ul>

View File

@ -15,9 +15,9 @@ About details of installing TDenine, please refer to [Installation Guide](../../
## Uninstall
<Tabs>
<TabItem label="Uninstall apt-get" value="aptremove">
<TabItem label="Uninstall by apt-get" value="aptremove">
Apt-get package of TDengine can be uninstalled as below:
Uninstall package of TDengine by apt-get can be uninstalled as below:
```bash
$ sudo apt-get remove tdengine
@ -35,7 +35,7 @@ TDengine is removed successfully!
```
Apt-get package of taosTools can be uninstalled as below:
If you have installed taos-tools, please uninstall it first before uninstall TDengine. The command of uninstall is following:
```
$ sudo apt remove taostools
@ -111,8 +111,20 @@ taos tools is uninstalled successfully!
```
</TabItem>
<TabItem label="Windows uninstall" value="windows">
Run C:\TDengine\unins000.exe to uninstall TDengine on a Windows system.
</TabItem>
<TabItem label="Mac uninstall" value="mac">
TDengine can be uninstalled as below:
```
$ rmtaos
TDengine is removed successfully!
```
</TabItem>
</Tabs>
@ -150,9 +162,9 @@ There are two aspects in upgrade operation: upgrade installation package and upg
To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version.
Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 2 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
- Stop inserting data
- Make sure all data is persisted to disk
- Make sure all data is persisted to disk, please use command `flush database`
- Stop the cluster of TDengine
- Uninstall old version and install new version
- Start the cluster of TDengine

View File

@ -18,14 +18,8 @@ To achieve absolutely no data loss, set wal_level to 2 and wal_fsync_period to 0
## Disaster Recovery
TDengine uses replication to provide high availability.
TDengine provides disaster recovery by using taosX to replicate data between two TDengine clusters which are deployed in two distant data centers. Assume there are two TDengine clusters, A and B, A is the source and B is the target, and A takes the workload of writing and querying. You can deploy `taosX` in the data center where cluster A resides in, `taosX` consumes the data written into cluster A and writes into cluster B. If the data center of cluster A is disrupted because of disaster, you can switch to cluster B to take the workload of data writing and querying, and deploy a `taosX` in the data center of cluster B to replicate data from cluster B to cluster A if cluster A has been recovered, or another cluster C if cluster A has not been recovered.
A TDengine cluster is managed by mnodes. You can configure up to three mnodes to ensure high availability. The data replication between mnode replicas is performed in a synchronous way to guarantee metadata consistency.
You can use the data replication feature of `taosX` to build more complicated disaster recovery solution.
The number of replicas for time series data in TDengine is associated with each database. There can be many databases in a cluster and each database can be configured with a different number of replicas. When creating a database, the parameter `replica` is used to specify the number of replicas. To achieve high availability, set `replica` to 3.
The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create a table.
As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is higher than 1, high availability can be achieved without any other assistance. For disaster recovery, dnodes of a TDengine cluster should be deployed in geographically different data centers.
Alternatively, you can use taosX to synchronize the data from one TDengine cluster to another cluster in a remote location. However, taosX is only available in TDengine enterprise version, for more information please contact tdengine.com.
taosX is only provided in TDengine enterprise edition, for more details please contact business@tdengine.com.

View File

@ -42,3 +42,304 @@ An existing Grafana Notification Channel can be specified with parameter `-E`, t
Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
For more use cases and restrictions please refer to [TDinsight](/reference/tdinsight/).
## log database
The data of tdinsight dashboard is stored in `log` database (default. You can change it in taoskeeper's config file. For more infrmation, please reference to [taoskeeper document](/reference/taosKeeper)). The taoskeeper will create log database on taoskeeper startup.
### cluster\_info table
`cluster_info` table contains cluster information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|first\_ep|VARCHAR||first ep of cluster|
|first\_ep\_dnode\_id|INT||dnode id or first\_ep|
|version|VARCHAR||tdengine version. such as: 3.0.4.0|
|master\_uptime|FLOAT||days of master's uptime|
|monitor\_interval|INT||monitor interval in second|
|dbs\_total|INT||total number of databases in cluster|
|tbs\_total|BIGINT||total number of tables in cluster|
|stbs\_total|INT||total number of stables in cluster|
|dnodes\_total|INT||total number of dnodes in cluster|
|dnodes\_alive|INT||total number of dnodes in ready state|
|mnodes\_total|INT||total number of mnodes in cluster|
|mnodes\_alive|INT||total number of mnodes in ready state|
|vgroups\_total|INT||total number of vgroups in cluster|
|vgroups\_alive|INT||total number of vgroups in ready state|
|vnodes\_total|INT||total number of vnode in cluster|
|vnodes\_alive|INT||total number of vnode in ready state|
|connections\_total|INT||total number of connections to cluster|
|topics\_total|INT||total number of topics in cluster|
|streams\_total|INT||total number of streams in cluster|
|protocol|INT||protocol version|
|cluster\_id|NCHAR|TAG|cluster id|
### d\_info table
`d_info` table contains dnodes information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|status|VARCHAR||dnode status|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### m\_info table
`m_info` table contains mnode information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|role|VARCHAR||the role of mnode. leader or follower|
|mnode\_id|INT|TAG|master node id|
|mnode\_ep|NCHAR|TAG|master node endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### dnodes\_info table
`dnodes_info` table contains dnodes information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|uptime|FLOAT||dnode uptime|
|cpu\_engine|FLOAT||cpu usage of tdengine. read from `/proc/<taosd_pid>/stat`|
|cpu\_system|FLOAT||cpu usage of server. read from `/proc/stat`|
|cpu\_cores|FLOAT||cpu cores of server|
|mem\_engine|INT||memory usage of tdengine. read from `/proc/<taosd_pid>/status`|
|mem\_system|INT||available memory on the server|
|mem\_total|INT||total memory of server in `KB`|
|disk\_engine|INT|||
|disk\_used|BIGINT||usage of data dir in `bytes`|
|disk\_total|BIGINT||the capacity of data dir in `bytes`|
|net\_in|FLOAT||network throughput rate in kb/s. read from `/proc/net/dev`|
|net\_out|FLOAT||network throughput rate in kb/s. read from `/proc/net/dev`|
|io\_read|FLOAT||io throughput rate in kb/s. read from `/proc/<taosd_pid>/io`|
|io\_write|FLOAT||io throughput rate in kb/s. read from `/proc/<taosd_pid>/io`|
|io\_read\_disk|FLOAT||io throughput rate of disk in kb/s. read from `/proc/<taosd_pid>/io`|
|io\_write\_disk|FLOAT||io throughput rate of disk in kb/s. read from `/proc/<taosd_pid>/io`|
|req\_select|INT||number of select queries received per dnode|
|req\_select\_rate|FLOAT||number of select queries received per dnode divided by monitor interval.|
|req\_insert|INT||number of insert queries received per dnode|
|req\_insert\_success|INT||number of successfully insert queries received per dnode|
|req\_insert\_rate|FLOAT||number of insert queries received per dnode divided by monitor interval|
|req\_insert\_batch|INT||number of batch insertions|
|req\_insert\_batch\_success|INT||number of successful batch insertions|
|req\_insert\_batch\_rate|FLOAT||number of batch insertions divided by monitor interval|
|errors|INT||dnode errors|
|vnodes\_num|INT||number of vnodes per dnode|
|masters|INT||number of master vnodes|
|has\_mnode|INT||if the dnode has mnode|
|has\_qnode|INT||if the dnode has qnode|
|has\_snode|INT||if the dnode has snode|
|has\_bnode|INT||if the dnode has bnode|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### data\_dir table
`data_dir` table contains data directory information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||data directory. default is `/var/lib/taos`|
|level|INT||level for multi-level storage|
|avail|BIGINT||available space for data directory|
|used|BIGINT||used space for data directory|
|total|BIGINT||total space for data directory|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### log\_dir table
`log_dir` table contains log directory information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||log directory. default is `/var/log/taos/`|
|avail|BIGINT||available space for log directory|
|used|BIGINT||used space for data directory|
|total|BIGINT||total space for data directory|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### temp\_dir table
`temp_dir` table contains temp dir information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||temp directory. default is `/tmp/`|
|avail|BIGINT||available space for temp directory|
|used|BIGINT||used space for temp directory|
|total|BIGINT||total space for temp directory|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### vgroups\_info table
`vgroups_info` table contains vgroups information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|vgroup\_id|INT||vgroup id|
|database\_name|VARCHAR||database for the vgroup|
|tables\_num|BIGINT||number of tables per vgroup|
|status|VARCHAR||status|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### vnodes\_role table
`vnodes_role` table contains vnode role information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|vnode\_role|VARCHAR||role. leader or follower|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### logs table
`logs` table contains login information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|level|VARCHAR||log level|
|content|NCHAR||log content|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### log\_summary table
`log_summary` table contains log summary information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|error|INT||error count|
|info|INT||info count|
|debug|INT||debug count|
|trace|INT||trace count|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### grants\_info table
`grants_info` table contains grants information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|expire\_time|BIGINT||time until grants expire in seconds|
|timeseries\_used|BIGINT||timeseries used|
|timeseries\_total|BIGINT||total timeseries|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### keeper\_monitor table
`keeper_monitor` table contains keeper monitor information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|cpu|FLOAT||cpu usage|
|mem|FLOAT||memory usage|
|identify|NCHAR|TAG||
### taosadapter\_restful\_http\_request\_total table
`taosadapter_restful_http_request_total` table contains taosadapter rest request information record. The timestamp column of this table is `_ts`.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||metric value|
|client\_ip|NCHAR|TAG|client ip|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|request\_method|NCHAR|TAG|request method|
|request\_uri|NCHAR|TAG|request uri|
|status\_code|NCHAR|TAG|status code|
### taosadapter\_restful\_http\_request\_fail table
`taosadapter_restful_http_request_fail` table contains taosadapter failed rest request information record. The timestamp column of this table is `_ts`.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||metric value|
|client\_ip|NCHAR|TAG|client ip|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|request\_method|NCHAR|TAG|request method|
|request\_uri|NCHAR|TAG|request uri|
|status\_code|NCHAR|TAG|status code|
### taosadapter\_restful\_http\_request\_in\_flight table
`taosadapter_restful_http_request_in_flight` table contains taosadapter rest request information record in real time. The timestamp column of this table is `_ts`.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||metric value|
|endpoint|NCHAR|TAG|taosadpater endpoint|
### taosadapter\_restful\_http\_request\_summary\_milliseconds table
`taosadapter_restful_http_request_summary_milliseconds` table contains the summary or rest information record. The timestamp column of this table is `_ts`.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|count|DOUBLE|||
|sum|DOUBLE|||
|0.5|DOUBLE|||
|0.9|DOUBLE|||
|0.99|DOUBLE|||
|0.1|DOUBLE|||
|0.2|DOUBLE|||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|request\_method|NCHAR|TAG|request method|
|request\_uri|NCHAR|TAG|request uri|
### taosadapter\_system\_mem\_percent table
`taosadapter_system_mem_percent` table contains taosadapter memory usage information. The timestamp of this table is `_ts`.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||metric value|
|endpoint|NCHAR|TAG|taosadpater endpoint|
### taosadapter\_system\_cpu\_percent table
`taosadapter_system_cpu_percent` table contains taosadapter cup usage information. The timestamp of this table is `_ts`.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||mertic value|
|endpoint|NCHAR|TAG|taosadpater endpoint|

View File

@ -382,6 +382,130 @@ Response body:
}
```
## REST API between TDengine 2.x and 3.0
### URI
| URI | TDengine 2.x | TDengine 3.0 |
| :--------------------| :------------------: | :--------------------------------------------------: |
| /rest/sql | Supported | Supported (with different response code and body) |
| /rest/sqlt | Supported | No more supported |
| /rest/sqlutc | Supported | No more supported |
### HTTP code
| HTTP code | TDengine 2.x | TDengine 3.0 | note |
| :--------------------| :------------------: | :----------: | :-----------------------------------: |
| 200 | Supported | Supported | Success or taosc return error |
| 400 | Not supported | Supported | Parameter error |
| 401 | Not supported | Supported | Authentication failure |
| 404 | Supported | Supported | URI not exist |
| 500 | Not supported | Supported | Internal error |
| 503 | Supported | Supported | Insufficient system resources |
### Response body
#### REST response body return from TDengine 2.x
```JSON
{
"status": "succ",
"head": [
"name",
"created_time",
"ntables",
"vgroups",
"replica",
"quorum",
"days",
"keep1,keep2,keep(D)",
"cache(MB)",
"blocks",
"minrows",
"maxrows",
"wallevel",
"fsync",
"comp",
"precision",
"status"
],
"data": [
[
"log",
"2020-09-02 17:23:00.039",
4,
1,
1,
1,
10,
"30,30,30",
1,
3,
100,
4096,
1,
3000,
2,
"us",
"ready"
]
],
"rows": 1
}
```
```
"data": [
[
"information_schema",
16,
"ready"
],
[
"performance_schema",
9,
"ready"
]
],
```
#### REST response body return from TDengine 3.0
```JSON
{
"code": 0,
"column_meta": [
[
"name",
"VARCHAR",
64
],
[
"ntables",
"BIGINT",
8
],
[
"status",
"VARCHAR",
10
]
],
"data": [
[
"information_schema",
16,
"ready"
],
[
"performance_schema",
9,
"ready"
]
],
"rows": 2
}
```
## Reference
[taosAdapter](/reference/taosadapter/)

View File

@ -725,7 +725,7 @@ consumer.close()
For more information, see [Data Subscription](../../../develop/tmq).
### Usage examples
#### Full Sample Code
<Tabs defaultValue="native">
<TabItem value="native" label="native connection">

View File

@ -120,7 +120,7 @@ _taosSql_ implements Go's `database/sql/driver` interface via cgo. You can use t
Use `taosSql` as `driverName` and use a correct [DSN](#DSN) as `dataSourceName`, DSN supports the following parameters.
* configPath specifies the `taos.cfg` directory
* cfg specifies the `taos.cfg` directory
For example:

View File

@ -10,10 +10,11 @@ import TabItem from "@theme/TabItem";
`taospy` is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/).
The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection".
`taos-ws-py` is an optional package to enable using WebSocket to connect TDengine.
The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST or WebSocket interface provided by taosAdapter is referred to hereinafter as a "REST connection" or "WebSocket connection".
The source code for the Python connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-python).
## Supported platforms
- The [supported platforms](/reference/connector/#supported-platforms) for the native connection are the same as the ones supported by the TDengine client.
@ -114,6 +115,15 @@ For REST connections, verifying that the `taosrest` module can be imported succe
import taosrest
```
</TabItem>
<TabItem value="ws" label="WebSocket connection">
For WebSocket connection, verifying that the `taosws` module can be imported successfully can be done in the Python Interactive Shell by typing.
```python
import taosws
```
</TabItem>
</Tabs>
@ -182,6 +192,28 @@ If the test is successful, it will output the server version information, e.g.
}
```
</TabItem>
<TabItem value="ws" label="WebSocket connection" groupId="connect">
For WebSocket connection, make sure the cluster and taosAdapter component, are running. This can be testetd using the following `curl` command.
```
curl -i -N -d "show databases" -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -H "Connection: Upgrade" -H "Upgrade: websocket" -H "Host: <FQDN>:<PORT>" -H "Origin: http://<FQDN>:<PORT>" http://<FQDN>:<PORT>/rest/sql
```
The FQDN above is the FQDN of the machine running taosAdapter, PORT is the port taosAdapter listening, default is `6041`.
If the test is successful, it will output the server version information, e.g.
```json
HTTP/1.1 200 OK
Content-Type: application/json; charset=utf-8
Date: Tue, 21 Mar 2023 09:29:17 GMT
Transfer-Encoding: chunked
{"status":"succ","head":["server_version()"],"column_meta":[["server_version()",8,8]],"data":[["2.6.0.27"]],"rows":1}
```
</TabItem>
</Tabs>
@ -321,6 +353,86 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
</TabItem>
</Tabs>
### Usage with req_id
By using the optional req_id parameter, you can specify a request ID that can be used for tracing.
<Tabs defaultValue="rest">
<TabItem value="native" label="native connection">
##### TaosConnection class
The `TaosConnection` class contains both an implementation of the PEP249 Connection interface (e.g., the `cursor()` method and the `close()` method) and many extensions (e.g., the `execute()`, `query()`, `schemaless_insert()`, and `subscribe()` methods).
```python title="execute method"
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:insert}}
```
```python title="query method"
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:query}}
```
:::tip
The queried results can only be fetched once. For example, only one of `fetch_all()` and `fetch_all_into_dict()` can be used in the example above. Repeated fetches will result in an empty list.
:::
##### Use of TaosResult class
In the above example of using the `TaosConnection` class, we have shown two ways to get the result of a query: `fetch_all()` and `fetch_all_into_dict()`. In addition, `TaosResult` also provides methods to iterate through the result set by rows (`rows_iter`) or by data blocks (`blocks_iter`). Using these two methods will be more efficient in scenarios where the query has a large amount of data.
```python title="blocks_iter method"
{{#include docs/examples/python/result_set_with_req_id_examples.py}}
```
##### Use of the TaosCursor class
The `TaosConnection` class and the `TaosResult` class already implement all the functionality of the native interface. If you are familiar with the interfaces in the PEP249 specification, you can also use the methods provided by the `TaosCursor` class.
```python title="Use of TaosCursor"
{{#include docs/examples/python/cursor_usage_native_reference_with_req_id.py}}
```
:::note
The TaosCursor class uses native connections for write and query operations. In a client-side multi-threaded scenario, this cursor instance must remain thread exclusive and cannot be shared across threads for use, otherwise, it will result in errors in the returned results.
:::
</TabItem>
<TabItem value="rest" label="REST connection">
##### Use of TaosRestCursor class
The `TaosRestCursor` class is an implementation of the PEP249 Cursor interface.
```python title="Use of TaosRestCursor"
{{#include docs/examples/python/connect_rest_with_req_id_examples.py:basic}}
```
- `cursor.execute`: Used to execute arbitrary SQL statements.
- `cursor.rowcount` : For write operations, returns the number of successful rows written. For query operations, returns the number of rows in the result set.
- `cursor.description` : Returns the description of the field. Please refer to [TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html) for the specific format of the description information.
##### Use of the RestClient class
The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
```python title="Use of RestClient"
{{#include docs/examples/python/rest_client_with_req_id_example.py}}
```
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
```python
{{#include docs/examples/python/connect_websocket_with_req_id_examples.py:basic}}
```
- `conn.execute`: can use to execute arbitrary SQL statements, and return the number of rows affected.
- `conn.query`: can use to execute query SQL statements, and return the query results.
</TabItem>
</Tabs>
### Used with pandas
<Tabs defaultValue="rest">
@ -347,6 +459,56 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
</TabItem>
</Tabs>
### Schemaless Insert
Connector support schemaless insert.
<Tabs defaultValue="list">
<TabItem value="list" label="List Insert">
Simple insert
```python
{{#include docs/examples/python/schemaless_insert.py}}
```
Insert with ttl argument
```python
{{#include docs/examples/python/schemaless_insert_ttl.py}}
```
Insert with req_id argument
```python
{{#include docs/examples/python/schemaless_insert_req_id.py}}
```
</TabItem>
<TabItem value="raw" label="Raw Insert">
Simple insert
```python
{{#include docs/examples/python/schemaless_insert_raw.py}}
```
Insert with ttl argument
```python
{{#include docs/examples/python/schemaless_insert_raw_ttl.py}}
```
Insert with req_id argument
```python
{{#include docs/examples/python/schemaless_insert_raw_req_id.py}}
```
</TabItem>
</Tabs>
### Other sample programs
| Example program links | Example program content |

View File

@ -1 +1 @@
label: "connector"
label: "Connector"

View File

@ -14,7 +14,7 @@ import PkgListV3 from "/components/PkgListV3";
Once the package is unzipped, you will see the following files in the directory:
- _ install_client.sh_: install script
- _ taos.tar.gz_: client driver package
- _ package.tar.gz_: client driver package
- _ driver_: TDengine client driver
- _examples_: some example programs of different programming languages (C/C#/go/JDBC/MATLAB/python/R)
You can run `install_client.sh` to install it.

View File

@ -66,15 +66,13 @@ Usage of taosAdapter:
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
--cors.allowHeaders stringArray cors allow HEADERS. Env "TAOS_ADAPTER_ALLOW_HEADERS"
--cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS"
--cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets"
--cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers"
--cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets" --cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers"
--debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" (default true)
--help Print this help message and exit
--httpCodeServerError Use a non-200 http status code when taosd returns an error. Env "TAOS_ADAPTER_HTTP_CODE_SERVER_ERROR"
--httpCodeServerError Use a non-200 http status code when server returns an error. Env "TAOS_ADAPTER_HTTP_CODE_SERVER_ERROR"
--influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true)
--log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL"
--log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos")
--log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30)
--log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos") --log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30)
--log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_ROTATION_SIZE" (default "1GB")
--log.rotationTime duration log rotation time. Env "TAOS_ADAPTER_LOG_ROTATION_TIME" (default 24h0m0s)
--log.sqlRotationCount uint record sql log rotation count. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_COUNT" (default 2)
@ -89,8 +87,7 @@ Usage of taosAdapter:
--monitor.password string TDengine password. Env "TAOS_ADAPTER_MONITOR_PASSWORD" (default "taosdata")
--monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_ADAPTER_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80)
--monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_ADAPTER_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70)
--monitor.user string TDengine user. Env "TAOS_ADAPTER_MONITOR_USER" (default "root")
--monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_ADAPTER_MONITOR_WRITE_INTERVAL" (default 30s)
--monitor.user string TDengine user. Env "TAOS_ADAPTER_MONITOR_USER" (default "root") --monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_ADAPTER_MONITOR_WRITE_INTERVAL" (default 30s)
--monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_ADAPTER_MONITOR_WRITE_TO_TD"
--node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE"
--node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE"
@ -119,14 +116,14 @@ Usage of taosAdapter:
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT"
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT"
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE"
--pool.maxConnect int max connections to server. Env "TAOS_ADAPTER_POOL_MAX_CONNECT"
--pool.maxIdle int max idle connections to server. Env "TAOS_ADAPTER_POOL_MAX_IDLE"
-P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041)
--prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true)
--restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1)
--smlAutoCreateDB Whether to automatically create db when writing with schemaless. Env "TAOS_ADAPTER_SML_AUTO_CREATE_DB"
--statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000)
--statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd")
--statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true)
--statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd") --statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true)
--statsd.deleteGauges statsd delete gauge cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_GAUGES" (default true)
--statsd.deleteSets statsd delete set cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_SETS" (default true)
--statsd.deleteTimings statsd delete timing cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_TIMINGS" (default true)
@ -136,11 +133,11 @@ Usage of taosAdapter:
--statsd.password string statsd password. Env "TAOS_ADAPTER_STATSD_PASSWORD" (default "taosdata")
--statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044)
--statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp")
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
--statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL"
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE" --statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL"
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
--tmq.releaseIntervalMultiplierForAutocommit int When set to autocommit, the interval for message release is a multiple of the autocommit interval, with a default value of 2 and a minimum value of 1 and a maximum value of 10. Env "TAOS_ADAPTER_TMQ_RELEASE_INTERVAL_MULTIPLIER_FOR_AUTOCOMMIT" (default 2)
--version Print the version and exit
```
@ -332,6 +329,10 @@ This parameter controls the number of results returned by the following interfac
taosAdapter uses the parameter `httpCodeServerError` to set whether to return a non-200 http status code http status code other than when the C interface returns an error. When set to true, different http status codes will be returned according to the error code returned by C. For details, see [RESTful API](https://docs.tdengine.com/reference/rest-api/) HTTP Response Code chapter.
## Configure whether schemaless writes automatically create DBs
Starting from version 3.0.4.0, the taosAdapter provides the parameter "smlAutoCreateDB" to control whether to automatically create DBs when writing with the schemaless protocol. The default value is false, which means that the DB will not be automatically created and the user needs to manually create the DB before performing schemaless writing.
## Troubleshooting
You can check the taosAdapter running status with the `systemctl status taosadapter` command.

View File

@ -403,11 +403,11 @@ See [General Configuration Parameters](#General Configuration Parameters) for de
#### Configuration parameters for executing the specified query statement
The configuration parameters for querying the sub-tables or the normal tables are set in `specified_table_query`.
The configuration parameters for querying the specified table (it can be a super table, a sub-table or a normal table) are set in `specified_table_query`.
- **query_interval** : The query interval in seconds, the default value is 0.
- **threads**: The number of threads to execute the query SQL, the default value is 1.
- **threads/concurrent**: The number of threads to execute the query SQL, the default value is 1.
- **sqls**.
- **sql**: the SQL command to be executed.
@ -434,9 +434,9 @@ The configuration parameters of the super table query are set in `super_table_qu
#### Configuration parameters for executing the specified subscription statement
The configuration parameters for subscribing to a sub-table or a generic table are set in `specified_table_query`.
The configuration parameters for subscribing to a specified table (it can be a super table, a sub-table or a generic table) are set in `specified_table_query`.
- **threads**: The number of threads to execute SQL, default is 1.
- **threads/concurrent**: The number of threads to execute SQL, default is 1.
- **interval**: The time interval to execute the subscription, in seconds, default is 0.

View File

@ -61,12 +61,14 @@ And many more parameters.
- -c CONFIGDIR: Specify the directory where configuration file exists. The default is `/etc/taos`, and the default name of the configuration file in this directory is `taos.cfg`
- -C: Print the configuration parameters of `taos.cfg` in the default directory or specified by -c
- -d DATABASE: Specify the database to use when connecting to the server
- -E dsn: connect to the TDengine Cloud or a server which provides WebSocket connection
- -f FILE: Execute the SQL script file in non-interactive mode Note that each SQL statement in the script file must be only one line.
- -k: Test the operational status of the server. 0: unavailable; 1: network ok; 2: service ok; 3: service degraded; 4: exiting
- -l PKTLEN: Test package size to be used for network testing
- -n NETROLE: test scope for network connection test, default is `client`. The value can be `client` or `server`.
- -N PKTNUM: Number of packets used for network testing
- -r: output the timestamp format as unsigned 64-bits integer (uint64_t in C language)
- -R: Use RESTful mode when connecting
- -s COMMAND: execute SQL commands in non-interactive mode
- -t: Test the boot status of the server. The statuses of -k apply.
- -w DISPLAYWIDTH: Specify the number of columns of the server display.

View File

@ -99,6 +99,9 @@ The parameters described in this document by the effect that they have on the sy
## Monitoring Parameters
:::note
Please note the `taoskeeper` needs to be installed and running to create the `log` database and receiving metrics sent by `taosd` as the full monitoring solution.
### monitor
| Attribute | Description |
@ -625,6 +628,16 @@ The charset that takes effect is UTF-8.
| Default Value | 1 |
| Note | The core file is generated under root directory `systemctl start taosd`/`launchctl start com.tdengine.taosd` is used to start, or under the working directory if `taosd` is started directly on Linux/macOS Shell. |
### enableScience
| Attribute | Description |
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
| Applicable | Only taos-CLI client |
| Meaning | Whether to show float and double with the scientific notation |
| Value Range | 0: false, 1: true |
| Default Value | 0 |
### udf
| Attribute | Description |

View File

@ -3,13 +3,11 @@ title: Schemaless Writing
description: This document describes how to use the schemaless write component of TDengine.
---
In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. Schemaless writing automatically creates storage structures for your data as it is being written to TDengine, so that you do not need to create supertables in advance. When necessary, schemaless writing
will automatically add the required columns to ensure that the data written by the user is stored correctly.
In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. Schemaless writing automatically creates storage structures for your data as it is being written to TDengine, so that you do not need to create supertables in advance. When necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly.
The schemaless writing method creates super tables and their corresponding subtables. These are completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and they lack readability.
Tips:
The schemaless write will automatically create a table. You do not need to create a table manually, or an unknown error may occur.
Note: Schemaless writing creates tables automatically. Creating tables manually is not supported with schemaless writing.
## Schemaless Writing Line Protocol
@ -50,8 +48,7 @@ In the schemaless writing data line protocol, each data item in the field_set ne
- `t`, `T`, `true`, `True`, `TRUE`, `f`, `F`, `false`, and `False` will be handled directly as BOOL types.
For example, the following data rows write c1 column as 3 (BIGINT), c2 column as false (BOOL), c3 column
as "passit" (BINARY), c4 column as 4 (DOUBLE), and the primary key timestamp as 1626006833639000000 to child table with the t1 label as "3" (NCHAR), the t2 label as "4" (NCHAR), and the t3 label as "t3" (NCHAR) and the super table named `st`.
For example, the following string indicates that the one row of data is written to the st supertable with the t1 tag as "3" (NCHAR), the t2 tag as "4" (NCHAR), and the t3 tag as "t3" (NCHAR); the c1 column is 3 (BIGINT), the c2 column is false (BOOL), the c3 column is "passit" (BINARY), the c4 column is 4 (DOUBLE), and the primary key timestamp is 1626006833639000000.
```json
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
@ -69,23 +66,31 @@ Schemaless writes process row data according to the following principles.
"measurement,tag_key1=tag_value1,tag_key2=tag_value2"
```
:::tip
Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol.
The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t_" is a fixed prefix that every table generated by this mapping relationship has.
The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t\_" is a fixed prefix that every table generated by this mapping relationship has.
:::
You can configure smlChildTableName in taos.cfg to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
2. If the super table obtained by parsing the line protocol does not exist, this super table is created.
**Important:** Manually creating supertables for schemaless writing is not supported. Schemaless writing creates appropriate supertables automatically.
3. If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2.
4. If the specified tag or regular column in the data row does not exist, the corresponding tag or regular column is added to the super table (only incremental).
5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to
NULL.
5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to NULL.
6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data.
7. Errors encountered throughout the processing will interrupt the writing process and return an error code.
8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, discarded since 3.0.3.0)
8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.
Note: TDengine 3.0.3.0 and later automatically detect whether order is consistent. This parameter is no longer used.
:::tip
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed
48KB, and the total length of tag value cannot exceed 16KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
:::
## Time resolution recognition
@ -114,8 +119,7 @@ In OpenTSDB file and JSON protocol modes, the precision of the timestamp is dete
## Data Model Mapping
This section describes how data in line protocol is mapped to a schema. The data measurement in each line is mapped to a
supertable name. The tag name in tag_set is the tag name in the schema, and the name in field_set is the column name in the schema. The following example shows how data is mapped:
This section describes how data in InfluxDB line protocol is mapped to a schema. The data measurement in each line is mapped to a supertable name. The tag name in tag_set is the tag name in the schema, and the name in field_set is the column name in the schema. The following example shows how data is mapped:
```json
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
@ -160,7 +164,7 @@ The preceding data includes a new entry, c6, with type binary(6). When this occu
TDengine guarantees the idempotency of data writes. This means that you can repeatedly call the API to perform write operations with bad data. However, TDengine does not guarantee the atomicity of multi-row writes. In a multi-row write, some data may be written successfully and other data unsuccessfully.
##: Error Codes
## Error Codes
The TSDB_CODE_TSC_LINE_SYNTAX_ERROR indicates an error in the schemaless writing component.
This error occurs when writing text. For other errors, schemaless writing uses the standard TDengine error codes

View File

@ -4,23 +4,24 @@ title: taosKeeper
description: This document describes how to use taosKeeper, a tool for exporting TDengine monitoring metrics.
---
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
## Introduction
taosKeeper is a tool for TDengine that exports monitoring metrics. With taosKeeper, you can easily monitor the operational status of your TDengine deployment. taosKeeper uses the TDengine REST API. It is not necessary to install TDengine Client to use taosKeeper.
## Installation
<!-- There are two ways to install taosKeeper: -->
There are two ways to install taosKeeper:
Methods of installing taosKeeper:
<!--- Installing the official TDengine installer will automatically install taosKeeper. Please refer to [TDengine installation](/operation/pkg-install) for details. -->
- Installing the official TDengine installer will automatically install taosKeeper. Please refer to [TDengine installation](/operation/pkg-install) for details.
- You can compile taosKeeper separately and install it. Please refer to the [taosKeeper](https://github.com/taosdata/taoskeeper) repository for details. -->
You can compile taosKeeper separately and install it. Please refer to the [taosKeeper](https://github.com/taosdata/taoskeeper) repository for details.
- You can compile taosKeeper separately and install it. Please refer to the [taosKeeper](https://github.com/taosdata/taoskeeper) repository for details.
## Configuration and Launch
## Run
### Configuration and running methods
### Configuration
taosKeeper needs to be executed on the terminal of the operating system, it supports three configuration methods: [Command-line arguments](#command-line-arguments-in-detail), [environment variable](#environment-variable-in-detail) and [configuration file](#configuration-file-parameters-in-detail). The precedence of those is Command-line, environment variable and configuration file.
@ -33,28 +34,81 @@ monitorFqdn localhost # taoskeeper's FQDN
For more information, see [TDengine Monitoring Configuration](../config/#monitoring).
### Command-Line Parameters
### Quick Launch
You can use command-line parameters to run taosKeeper and control its behavior:
<Tabs>
<TabItem label="Linux" value="linux">
```shell
$ taosKeeper
```
### Environment variable
After the installation is complete, run the following command to start the taoskeeper service:
You can use Environment variable to run taosKeeper and control its behavior:
```shell
$ export TAOS_KEEPER_TDENGINE_HOST=192.168.64.3
$ taoskeeper
```bash
systemctl start taoskeeper
```
you can run `taoskeeper -h` for more detail.
Run the following command to confirm that taoskeeper is running normally:
### Configuration File
```bash
systemctl status taoskeeper
```
You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/keeper.toml` is used by default. If this file does not specify configurations, the default values are used.
Output similar to the following indicates that taoskeeper is running normally:
```
Active: active (running)
```
Output similar to the following indicates that taoskeeper has not started successfully:
```
Active: inactive (dead)
```
The following `systemctl` commands can help you manage taoskeeper service:
- Start taoskeeper Server: `systemctl start taoskeeper`
- Stop taoskeeper Server: `systemctl stop taoskeeper`
- Restart taoskeeper Server: `systemctl restart taoskeeper`
- Check taoskeeper Server status: `systemctl status taoskeeper`
:::info
- The `systemctl` command requires _root_ privileges. If you are not logged in as the _root_ user, use the `sudo` command.
- The `systemctl stop taoskeeper` command will instantly stop taoskeeper Server.
- If your system does not include `systemd`, you can run `/usr/local/taos/bin/taoskeeper` to start taoskeeper manually.
:::
</TabItem>
<TabItem label="macOS" value="macos">
After the installation is complete, run `launchctl start com.tdengine.taoskeeper` to start taoskeeper Server.
The following `launchctl` commands can help you manage taoskeeper service:
- Start taoskeeper Server: `sudo launchctl start com.tdengine.taoskeeper`
- Stop taoskeeper Server: `sudo launchctl stop com.tdengine.taoskeeper`
- Check taoskeeper Server status: `sudo launchctl list | grep taoskeeper`
:::info
- Please use `sudo` to run `launchctl` to manage _com.tdengine.taoskeeper_ with administrator privileges.
- The administrator privilege is required for service management to enhance security.
- Troubleshooting:
- The first column returned by the command `launchctl list | grep taoskeeper` is the PID of the program. If it's `-`, that means the taoskeeper service is not running.
- If the service is abnormal, please check the `launchd.log` file from the system log.
:::
</TabItem>
</Tabs>
#### Launch With Configuration File
You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/taoskeeper.toml` is used by default. If this file does not specify configurations, the default values are used.
```shell
$ taoskeeper -c <keeper config file>
@ -99,6 +153,10 @@ database = "log"
# standard tables to monitor
tables = ["normal_table"]
# database options for db storing metrics data
[metrics.databaseoptions]
cachemodel = "none"
```
### Obtain Monitoring Metrics
@ -148,3 +206,46 @@ taos_cluster_info_dnodes_total{cluster_id="5981392874047724755"} 1
# TYPE taos_cluster_info_first_ep gauge
taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1
```
### check\_health
```
$ curl -i http://127.0.0.1:6043/check_health
```
Response:
```
HTTP/1.1 200 OK
Content-Type: application/json; charset=utf-8
Date: Mon, 03 Apr 2023 07:20:38 GMT
Content-Length: 19
{"version":"1.0.0"}
```
### taoskeeper with Prometheus
There is `/metrics` api in taoskeeper provide TDengine metric data for Prometheus.
#### scrape config
Scrape config in Prometheus specifies a set of targets and parameters describing how to scrape metric data from endpoint. For more information, please reference to [Prometheus documents](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config).
```
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
- job_name: "taoskeeper"
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ["localhost:6043"]
```
#### Dashboard
There is a dashboard named `TaosKeeper Prometheus Dashboard for 3.x`, which provides a monitoring dashboard similar to TInsight.
In Grafana, click the Dashboard menu and click `import`, enter the dashboard ID `18587` and click the `Load` button. Then finished importing `TaosKeeper Prometheus Dashboard for 3.x` dashboard.

View File

@ -77,7 +77,7 @@ sudo -u grafana grafana-cli plugins install tdengine-datasource
You can also download zip files from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and install manually. The commands are as follows:
```bash
GF_VERSION=3.2.7
GF_VERSION=3.3.1
# from GitHub
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
# from Grafana

View File

@ -10,6 +10,22 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
import Release from "/components/ReleaseV3";
## 3.0.4.0
<Release type="tdengine" version="3.0.4.0" />
## 3.0.3.2
<Release type="tdengine" version="3.0.3.2" />
## 3.0.3.1
<Release type="tdengine" version="3.0.3.1" />
## 3.0.3.1
<Release type="tdengine" version="3.0.3.1" />
## 3.0.3.0
<Release type="tdengine" version="3.0.3.0" />

View File

@ -10,6 +10,18 @@ For other historical version installers, please visit [here](https://www.taosdat
import Release from "/components/ReleaseV3";
## 2.4.12
<Release type="tools" version="2.4.12" />
## 2.4.11
<Release type="tools" version="2.4.11" />
## 2.4.10
<Release type="tools" version="2.4.10" />
## 2.4.9
<Release type="tools" version="2.4.9" />

View File

@ -70,7 +70,7 @@ static int32_t init_env() {
taos_free_result(pRes);
// create database
pRes = taos_query(pConn, "create database tmqdb");
pRes = taos_query(pConn, "create database tmqdb wal_retention_period 3600");
if (taos_errno(pRes) != 0) {
printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes));
return -1;

View File

@ -48,7 +48,7 @@ namespace TDengineExample
static void PrepareDatabase(IntPtr conn)
{
IntPtr res = TDengine.Query(conn, "CREATE DATABASE test");
IntPtr res = TDengine.Query(conn, "CREATE DATABASE test WAL_RETENTION_PERIOD 3600");
if (TDengine.ErrorNo(res) != 0)
{
throw new Exception("failed to create database, reason: " + TDengine.Error(res));

View File

@ -54,7 +54,7 @@ namespace TDengineExample
static void PrepareDatabase(IntPtr conn)
{
IntPtr res = TDengine.Query(conn, "CREATE DATABASE test");
IntPtr res = TDengine.Query(conn, "CREATE DATABASE test WAL_RETENTION_PERIOD 3600");
if (TDengine.ErrorNo(res) != 0)
{
throw new Exception("failed to create database, reason: " + TDengine.Error(res));

View File

@ -58,7 +58,7 @@ namespace TDengineExample
static void PrepareDatabase(IntPtr conn)
{
IntPtr res = TDengine.Query(conn, "CREATE DATABASE test");
IntPtr res = TDengine.Query(conn, "CREATE DATABASE test WAL_RETENTION_PERIOD 3600");
if (TDengine.ErrorNo(res) != 0)
{
throw new Exception("failed to create database, reason: " + TDengine.Error(res));

View File

@ -11,7 +11,7 @@ namespace TDengineExample
IntPtr conn = GetConnection();
try
{
IntPtr res = TDengine.Query(conn, "CREATE DATABASE power");
IntPtr res = TDengine.Query(conn, "CREATE DATABASE power WAL_RETENTION_PERIOD 3600");
CheckRes(conn, res, "failed to create database");
res = TDengine.Query(conn, "USE power");
CheckRes(conn, res, "failed to change database");

View File

@ -76,7 +76,7 @@ namespace TDengineExample
static void PrepareSTable()
{
IntPtr res = TDengine.Query(conn, "CREATE DATABASE power");
IntPtr res = TDengine.Query(conn, "CREATE DATABASE power WAL_RETENTION_PERIOD 3600");
CheckResPtr(res, "failed to create database");
res = TDengine.Query(conn, "USE power");
CheckResPtr(res, "failed to change database");

View File

@ -15,7 +15,7 @@ func main() {
panic(err)
}
defer db.Close()
_, err = db.Exec("create database if not exists example_tmq")
_, err = db.Exec("create database if not exists example_tmq wal_retention_period 3600")
if err != nil {
panic(err)
}
@ -35,7 +35,6 @@ func main() {
"td.connect.port": "6030",
"client.id": "test_tmq_client",
"enable.auto.commit": "false",
"enable.heartbeat.background": "true",
"experimental.snapshot.enable": "true",
"msg.with.table.name": "true",
})

View File

@ -35,7 +35,7 @@ public class SubscribeDemo {
try (Statement statement = connection.createStatement()) {
statement.executeUpdate("drop topic if exists " + TOPIC);
statement.executeUpdate("drop database if exists " + DB_NAME);
statement.executeUpdate("create database " + DB_NAME);
statement.executeUpdate("create database " + DB_NAME + " wal_retention_period 3600");
statement.executeUpdate("use " + DB_NAME);
statement.executeUpdate(
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT) TAGS (`groupid` INT, `location` BINARY(24))");

View File

@ -35,7 +35,7 @@ public class WebsocketSubscribeDemo {
Statement statement = connection.createStatement()) {
statement.executeUpdate("drop topic if exists " + TOPIC);
statement.executeUpdate("drop database if exists " + DB_NAME);
statement.executeUpdate("create database " + DB_NAME);
statement.executeUpdate("create database " + DB_NAME + " wal_retention_period 3600");
statement.executeUpdate("use " + DB_NAME);
statement.executeUpdate(
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT) TAGS (`groupid` INT, `location` BINARY(24))");

View File

@ -4,7 +4,7 @@ import taos
taos_conn = taos.connect()
taos_conn.execute('drop database if exists power')
taos_conn.execute('create database if not exists power')
taos_conn.execute('create database if not exists power wal_retention_period 3600')
taos_conn.execute("use power")
taos_conn.execute(
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)")

View File

@ -0,0 +1,44 @@
# ANCHOR: connect
from taosrest import connect, TaosRestConnection, TaosRestCursor
conn = connect(url="http://localhost:6041",
user="root",
password="taosdata",
timeout=30)
# ANCHOR_END: connect
# ANCHOR: basic
# create STable
cursor = conn.cursor()
cursor.execute("DROP DATABASE IF EXISTS power", req_id=1)
cursor.execute("CREATE DATABASE power", req_id=2)
cursor.execute(
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)", req_id=3)
# insert data
cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""", req_id=4)
print("inserted row count:", cursor.rowcount)
# query data
cursor.execute("SELECT * FROM power.meters LIMIT 3", req_id=5)
# get total rows
print("queried row count:", cursor.rowcount)
# get column names from cursor
column_names = [meta[0] for meta in cursor.description]
# get rows
data = cursor.fetchall()
print(column_names)
for row in data:
print(row)
# output:
# inserted row count: 8
# queried row count: 3
# ['ts', 'current', 'voltage', 'phase', 'location', 'groupid']
# [datetime.datetime(2018, 10, 3, 14, 38, 5, 500000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 11.8, 221, 0.28, 'california.losangeles', 2]
# [datetime.datetime(2018, 10, 3, 14, 38, 16, 600000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 13.4, 223, 0.29, 'california.losangeles', 2]
# [datetime.datetime(2018, 10, 3, 14, 38, 5, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 10.8, 223, 0.29, 'california.losangeles', 3]
# ANCHOR_END: basic

View File

@ -6,7 +6,7 @@ conn = taosws.connect("taosws://root:taosdata@localhost:6041")
# ANCHOR: basic
conn.execute("drop database if exists connwspy")
conn.execute("create database if not exists connwspy")
conn.execute("create database if not exists connwspy wal_retention_period 3600")
conn.execute("use connwspy")
conn.execute("create table if not exists stb (ts timestamp, c1 int) tags (t1 int)")
conn.execute("create table if not exists tb1 using stb tags (1)")

View File

@ -0,0 +1,29 @@
# ANCHOR: connect
import taosws
conn = taosws.connect("taosws://root:taosdata@localhost:6041")
# ANCHOR_END: connect
# ANCHOR: basic
conn.execute("drop database if exists connwspy", req_id=1)
conn.execute("create database if not exists connwspy", req_id=2)
conn.execute("use connwspy", req_id=3)
conn.execute("create table if not exists stb (ts timestamp, c1 int) tags (t1 int)", req_id=4)
conn.execute("create table if not exists tb1 using stb tags (1)", req_id=5)
conn.execute("insert into tb1 values (now, 1)", req_id=6)
conn.execute("insert into tb1 values (now, 2)", req_id=7)
conn.execute("insert into tb1 values (now, 3)", req_id=8)
r = conn.execute("select * from stb", req_id=9)
result = conn.query("select * from stb", req_id=10)
num_of_fields = result.field_count
print(num_of_fields)
for row in result:
print(row)
# output:
# 3
# ('2023-02-28 15:56:13.329 +08:00', 1, 1)
# ('2023-02-28 15:56:13.333 +08:00', 2, 1)
# ('2023-02-28 15:56:13.337 +08:00', 3, 1)

View File

@ -0,0 +1,45 @@
import taos
# ANCHOR: insert
conn = taos.connect()
# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement.
conn.execute("DROP DATABASE IF EXISTS test", req_id=1)
conn.execute("CREATE DATABASE test", req_id=2)
# change database. same as execute "USE db"
conn.select_db("test")
conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)", req_id=3)
affected_row = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)", req_id=4)
print("affected_row", affected_row)
# output:
# affected_row 3
# ANCHOR_END: insert
# ANCHOR: query
# Execute a sql and get its result set. It's useful for SELECT statement
result = conn.query("SELECT * from weather", req_id=5)
# Get fields from result
fields = result.fields
for field in fields:
print(field) # {name: ts, type: 9, bytes: 8}
# output:
# {name: ts, type: 9, bytes: 8}
# {name: temperature, type: 6, bytes: 4}
# {name: location, type: 4, bytes: 4}
# Get data from result as list of tuple
data = result.fetch_all()
print(data)
# output:
# [(datetime.datetime(2022, 4, 27, 9, 4, 25, 367000), 23.5, 1), (datetime.datetime(2022, 4, 27, 9, 5, 25, 367000), 23.5, 1), (datetime.datetime(2022, 4, 27, 9, 6, 25, 367000), 24.399999618530273, 1)]
# Or get data from result as a list of dict
# map_data = result.fetch_all_into_dict()
# print(map_data)
# output:
# [{'ts': datetime.datetime(2022, 4, 27, 9, 1, 15, 343000), 'temperature': 23.5, 'location': 1}, {'ts': datetime.datetime(2022, 4, 27, 9, 2, 15, 343000), 'temperature': 23.5, 'location': 1}, {'ts': datetime.datetime(2022, 4, 27, 9, 3, 15, 343000), 'temperature': 24.399999618530273, 'location': 1}]
# ANCHOR_END: query
conn.close()

View File

@ -0,0 +1,32 @@
import taos
conn = taos.connect()
cursor = conn.cursor()
cursor.execute("DROP DATABASE IF EXISTS test", req_id=1)
cursor.execute("CREATE DATABASE test", req_id=2)
cursor.execute("USE test", req_id=3)
cursor.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)", req_id=4)
for i in range(1000):
location = str(i % 10)
tb = "t" + location
cursor.execute(f"INSERT INTO {tb} USING weather TAGS({location}) VALUES (now+{i}a, 23.5) (now+{i + 1}a, 23.5)", req_id=5+i)
cursor.execute("SELECT count(*) FROM weather", req_id=1005)
data = cursor.fetchall()
print("count:", data[0][0])
cursor.execute("SELECT tbname, * FROM weather LIMIT 2", req_id=1006)
col_names = [meta[0] for meta in cursor.description]
print(col_names)
rows = cursor.fetchall()
print(rows)
cursor.close()
conn.close()
# output:
# count: 2000
# ['tbname', 'ts', 'temperature', 'location']
# row_count: -1
# [('t0', datetime.datetime(2022, 4, 27, 14, 54, 24, 392000), 23.5, 0), ('t0', datetime.datetime(2022, 4, 27, 14, 54, 24, 393000), 23.5, 0)]

View File

@ -5,7 +5,7 @@ LOCATIONS = ['California.SanFrancisco', 'California.LosAngles', 'California.SanD
'California.PaloAlto', 'California.Campbell', 'California.MountainView', 'California.Sunnyvale',
'California.SantaClara', 'California.Cupertino']
CREATE_DATABASE_SQL = 'create database if not exists {} keep 365 duration 10 buffer 16 wal_level 1'
CREATE_DATABASE_SQL = 'create database if not exists {} keep 365 duration 10 buffer 16 wal_level 1 wal_retention_period 3600'
USE_DATABASE_SQL = 'use {}'
DROP_TABLE_SQL = 'drop table if exists meters'
DROP_DATABASE_SQL = 'drop database if exists {}'

View File

@ -0,0 +1,9 @@
from taosrest import RestClient
client = RestClient("http://localhost:6041", user="root", password="taosdata")
res: dict = client.sql("SELECT ts, current FROM power.meters LIMIT 1", req_id=1)
print(res)
# output:
# {'status': 'succ', 'head': ['ts', 'current'], 'column_meta': [['ts', 9, 8], ['current', 6, 4]], 'data': [[datetime.datetime(2018, 10, 3, 14, 38, 5, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 10.3]], 'rows': 1}

View File

@ -0,0 +1,33 @@
import taos
conn = taos.connect()
conn.execute("DROP DATABASE IF EXISTS test", req_id=1)
conn.execute("CREATE DATABASE test", req_id=2)
conn.select_db("test")
conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)", req_id=3)
# prepare data
for i in range(2000):
location = str(i % 10)
tb = "t" + location
conn.execute(f"INSERT INTO {tb} USING weather TAGS({location}) VALUES (now+{i}a, 23.5) (now+{i + 1}a, 23.5)", req_id=4+i)
result: taos.TaosResult = conn.query("SELECT * FROM weather", req_id=2004)
block_index = 0
blocks: taos.TaosBlocks = result.blocks_iter()
for rows, length in blocks:
print("block ", block_index, " length", length)
print("first row in this block:", rows[0])
block_index += 1
conn.close()
# possible output:
# block 0 length 1200
# first row in this block: (datetime.datetime(2022, 4, 27, 15, 14, 52, 46000), 23.5, 0)
# block 1 length 1200
# first row in this block: (datetime.datetime(2022, 4, 27, 15, 14, 52, 76000), 23.5, 3)
# block 2 length 1200
# first row in this block: (datetime.datetime(2022, 4, 27, 15, 14, 52, 99000), 23.5, 6)
# block 3 length 400
# first row in this block: (datetime.datetime(2022, 4, 27, 15, 14, 52, 122000), 23.5, 9)

View File

@ -0,0 +1,21 @@
import taos
conn = taos.connect()
dbname = "pytest_line"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000',
]
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED)
print("inserted")
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED)
result = conn.query("show tables")
for row in result:
print(row)
conn.execute("drop database if exists %s" % dbname)

View File

@ -0,0 +1,74 @@
import taos
from taos import utils
from taos import TaosConnection
from taos.cinterface import *
from taos.error import OperationalError, SchemalessError
conn = taos.connect()
dbname = "taos_schemaless_insert"
try:
conn.execute("drop database if exists %s" % dbname)
if taos.IS_V3:
conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname)
else:
conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname)
conn.select_db(dbname)
lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000
st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000
stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
res = conn.schemaless_insert_raw(lines, 1, 0)
print("affected rows: ", res)
assert (res == 3)
lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
res = conn.schemaless_insert_raw(lines, 1, 0)
print("affected rows: ", res)
assert (res == 1)
result = conn.query("select * from st")
dict2 = result.fetch_all_into_dict()
print(dict2)
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
assert (result.row_count == 2)
# error test
lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000'''
try:
res = conn.schemaless_insert_raw(lines, 1, 0)
print(res)
# assert(False)
except SchemalessError as err:
print('**** error: ', err)
# assert (err.msg == 'Invalid data format')
result = conn.query("select * from st")
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
conn.execute("drop database if exists %s" % dbname)
conn.close()
except InterfaceError as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
except SchemalessError as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
raise err

View File

@ -0,0 +1,76 @@
import taos
from taos import utils
from taos import TaosConnection
from taos.cinterface import *
from taos.error import OperationalError, SchemalessError
conn = taos.connect()
dbname = "taos_schemaless_insert"
try:
conn.execute("drop database if exists %s" % dbname)
if taos.IS_V3:
conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname)
else:
conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname)
conn.select_db(dbname)
lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000
st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000
stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
ttl = 1000
req_id = utils.gen_req_id()
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id)
print("affected rows: ", res)
assert (res == 3)
lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
ttl = 1000
req_id = utils.gen_req_id()
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id)
print("affected rows: ", res)
assert (res == 1)
result = conn.query("select * from st")
dict2 = result.fetch_all_into_dict()
print(dict2)
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
assert (result.row_count == 2)
# error test
lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000'''
try:
ttl = 1000
req_id = utils.gen_req_id()
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id)
print(res)
# assert(False)
except SchemalessError as err:
print('**** error: ', err)
# assert (err.msg == 'Invalid data format')
result = conn.query("select * from st")
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
conn.execute("drop database if exists %s" % dbname)
conn.close()
except InterfaceError as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
raise err

View File

@ -0,0 +1,73 @@
import taos
from taos import utils
from taos import TaosConnection
from taos.cinterface import *
from taos.error import OperationalError, SchemalessError
conn = taos.connect()
dbname = "taos_schemaless_insert"
try:
conn.execute("drop database if exists %s" % dbname)
if taos.IS_V3:
conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname)
else:
conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname)
conn.select_db(dbname)
lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000
st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000
stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
ttl = 1000
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl)
print("affected rows: ", res)
assert (res == 3)
lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
ttl = 1000
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl)
print("affected rows: ", res)
assert (res == 1)
result = conn.query("select * from st")
dict2 = result.fetch_all_into_dict()
print(dict2)
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
assert (result.row_count == 2)
# error test
lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000'''
try:
ttl = 1000
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl)
print(res)
# assert(False)
except SchemalessError as err:
print('**** error: ', err)
# assert (err.msg == 'Invalid data format')
result = conn.query("select * from st")
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
conn.execute("drop database if exists %s" % dbname)
conn.close()
except InterfaceError as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
raise err

View File

@ -0,0 +1,22 @@
import taos
from taos import SmlProtocol, SmlPrecision
conn = taos.connect()
dbname = "pytest_line"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000',
]
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED, req_id=1)
print("inserted")
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED, req_id=2)
result = conn.query("show tables")
for row in result:
print(row)
conn.execute("drop database if exists %s" % dbname)

View File

@ -0,0 +1,22 @@
import taos
from taos import SmlProtocol, SmlPrecision
conn = taos.connect()
dbname = "pytest_line"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000',
]
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED, ttl=1000)
print("inserted")
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED, ttl=1000)
result = conn.query("show tables")
for row in result:
print(row)
conn.execute("drop database if exists %s" % dbname)

View File

@ -6,7 +6,7 @@ def init_tmq_env(db, topic):
conn = taos.connect()
conn.execute("drop topic if exists {}".format(topic))
conn.execute("drop database if exists {}".format(db))
conn.execute("create database if not exists {}".format(db))
conn.execute("create database if not exists {} wal_retention_period 3600".format(db))
conn.select_db(db)
conn.execute(
"create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))")

View File

@ -178,7 +178,7 @@ Active: inactive (dead)
:::
## TDengine 命令行CLI
**TDengine 命令行CLI**
为便于检查 TDengine 的状态执行数据库Database的各种即席Ad Hoc查询TDengine 提供一命令行应用程序(以下简称为 TDengine CLItaos。要进入 TDengine 命令行,您只要在终端执行 `taos` 即可。
@ -186,9 +186,9 @@ Active: inactive (dead)
<TabItem label="Windows 系统" value="windows">
安装后,在 `C:\TDengine` 目录下,运行 `taosd.exe` 来启动 TDengine 服务进程。
安装后,可以在拥有管理员权限的 cmd 窗口执行 `sc start taosd``C:\TDengine` 目录下,运行 `taosd.exe` 来启动 TDengine 服务进程。
## TDengine 命令行CLI
**TDengine 命令行CLI**
为便于检查 TDengine 的状态执行数据库Database的各种即席Ad Hoc查询TDengine 提供一命令行应用程序(以下简称为 TDengine CLItaos。要进入 TDengine 命令行,您只要在终端执行 `taos` 即可。
@ -215,7 +215,7 @@ Active: inactive (dead)
:::
## TDengine 命令行CLI
**TDengine 命令行CLI**
为便于检查 TDengine 的状态执行数据库Database的各种即席Ad Hoc查询TDengine 提供一命令行应用程序(以下简称为 TDengine CLItaos。要进入 TDengine 命令行,您只要在 Windows 终端的 C:\TDengine 目录下,运行 taos.exe 来启动 TDengine 命令行。

View File

@ -4,7 +4,7 @@ description: '快速设置 TDengine 环境并体验其高效写入和查询'
---
import xiaot from './xiaot.webp'
import xiaot_new from './xiaot-new.webp'
import xiaot_new from './xiaot-03.webp'
import channel from './channel.webp'
import official_account from './official-account.webp'
@ -19,17 +19,6 @@ import {useCurrentSidebarCategory} from '@docusaurus/theme-common';
<DocCardList items={useCurrentSidebarCategory().items}/>
```
## 学习 TDengine 知识地图
TDengine 知识地图中涵盖了 TDengine 的各种知识点,揭示了各概念实体之间的调用关系和数据流向。学习和了解 TDengine 知识地图有助于你快速掌握 TDengine 的知识体系。
<figure>
<center>
<a href="pathname:///img/tdengine-map.svg" target="_blank"><img src="/img/tdengine-map.svg" width="80%" /></a>
<figcaption>图 1. TDengine 知识地图</figcaption>
</center>
</figure>
## 加入 TDengine 官方社区
微信扫描以下二维码,学习了解 TDengine 的最新技术与大家共同交流物联网大数据技术应用、TDengine 使用问题和技巧等话题。
@ -41,7 +30,7 @@ TDengine 知识地图中涵盖了 TDengine 的各种知识点,揭示了各概
<td style={{padding:'1em 3em',border:0}}><img src={official_account} alt="TDengine 微信公众号" width="200" /></td>
</tr>
<tr align="center">
<td style={{padding:'1em 3em',border:0}}>加入“物联网大数据技术群”<br/>与大家进行技术交流</td>
<td style={{padding:'1em 3em',border:0}}>加入 TDengine 微信群<br/>了解学习最新物联网技术</td>
<td style={{padding:'1em 3em',border:0}}>关注 TDengine 视频号<br/>收看技术直播与教学视频</td>
<td style={{padding:'1em 3em',border:0}}>关注 TDengine 公众号<br/>阅读技术文章与行业案例</td>
</tr>

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

View File

@ -52,7 +52,7 @@ CREATE TABLE d1004 USING meters TAGS ("California.LosAngeles", 3);
### 创建流
```sql
create stream current_stream into current_stream_output_stb as select _wstart as start, _wend as wend, max(current) as max_current from meters where voltage <= 220 interval (5s);
create stream current_stream trigger at_once into current_stream_output_stb as select _wstart as wstart, _wend as wend, max(current) as max_current from meters where voltage <= 220 interval (5s);
```
### 写入数据
@ -70,8 +70,8 @@ insert into d1004 values("2018-10-03 14:38:06.500", 11.50000, 221, 0.35000);
### 查询以观察结果
```sql
taos> select start, wend, max_current from current_stream_output_stb;
start | wend | max_current |
taos> select wstart, wend, max_current from current_stream_output_stb;
wstart | wend | max_current |
===========================================================================
2018-10-03 14:38:05.000 | 2018-10-03 14:38:10.000 | 10.30000 |
2018-10-03 14:38:15.000 | 2018-10-03 14:38:20.000 | 12.60000 |
@ -89,7 +89,7 @@ Query OK, 2 rows in database (0.018762s)
### 创建流
```sql
create stream power_stream into power_stream_output_stb as select ts, concat_ws(".", location, tbname) as meter_location, current*voltage*cos(phase) as active_power, current*voltage*sin(phase) as reactive_power from meters partition by tbname;
create stream power_stream trigger at_once into power_stream_output_stb as select ts, concat_ws(".", location, tbname) as meter_location, current*voltage*cos(phase) as active_power, current*voltage*sin(phase) as reactive_power from meters partition by tbname;
```
### 写入数据

View File

@ -25,6 +25,7 @@ import CDemo from "./_sub_c.mdx";
本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
注意默认是从wal消费数据如果wal被删除消费到的数据会不全此时可以将参数 experimental.snapshot.enable 设置为true从tsdb获取全部数据但是这样的话就不能保证数据的消费顺序。所以建议根据自己的消费情况合理的设置wal的保留策略保证可以从wal里订阅到全部数据。
## 主要数据结构和 API
不同语言下, TMQ 订阅相关的 API 及数据结构如下:
@ -283,18 +284,17 @@ CREATE TOPIC topic_name AS DATABASE db_name;
| 参数名称 | 类型 | 参数说明 | 备注 |
| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
| `td.connect.ip` | string | 用于创建连接,同 `taos_connect` | |
| `td.connect.user` | string | 用于创建连接,同 `taos_connect` | |
| `td.connect.pass` | string | 用于创建连接,同 `taos_connect` | |
| `td.connect.port` | integer | 用于创建连接,同 `taos_connect` | |
| `td.connect.ip` | string | 用于创建连接,同 `taos_connect` | 仅用于建立原生连接 |
| `td.connect.user` | string | 用于创建连接,同 `taos_connect` | 仅用于建立原生连接 |
| `td.connect.pass` | string | 用于创建连接,同 `taos_connect` | 仅用于建立原生连接 |
| `td.connect.port` | integer | 用于创建连接,同 `taos_connect` | 仅用于建立原生连接 |
| `group.id` | string | 消费组 ID同一消费组共享消费进度 | **必填项**。最大长度192。 |
| `client.id` | string | 客户端 ID | 最大长度192。 |
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default;从头开始订阅; <br/>`latest`: 仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
| `enable.auto.commit` | boolean | 是否启用消费位点自动提交 | 合法值:`true`, `false`。 |
| `auto.commit.interval.ms` | integer | 以毫秒为单位的消费记录自动提交消费位点时间间隔 | 默认 5000 m |
| `enable.heartbeat.background` | boolean | 启用后台心跳,启用后即使长时间不 poll 消息也不会造成离线 | 默认开启 |
| `experimental.snapshot.enable` | boolean | 是否允许从 TSDB 消费数据 | 实验功能,默认关闭 |
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句) | |
| `enable.auto.commit` | boolean | 是否启用消费位点自动提交true: 自动提交客户端应用无需commitfalse客户端应用需要自行commit | 默认值为 true |
| `auto.commit.interval.ms` | integer | 消费记录自动提交消费位点时间间隔,单位为毫秒 | 默认值为 5000 |
| `experimental.snapshot.enable` | boolean | 是否允许从 TSDB 消费数据。当其关闭时,只能消费依据 WAL 保留策略仍然在WAL中的数据当其打开时除WAL中的数据以外也能够消费已经从WAL中删除但落盘到TSDB中的数据 | 实验功能,默认关闭 |
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句) |默认关闭 |
对于不同编程语言,其设置方式如下:
@ -367,7 +367,6 @@ conf := &tmq.ConfigMap{
"td.connect.port": "6030",
"client.id": "test_tmq_c",
"enable.auto.commit": "false",
"enable.heartbeat.background": "true",
"experimental.snapshot.enable": "true",
"msg.with.table.name": "true",
}
@ -419,7 +418,6 @@ consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
| `auto.commit.interval.ms` | string | 以毫秒为单位的自动提交时间间隔 | 默认值5000 ms |
| `auto.offset.reset` | string | 消费组订阅的初始位置 | 可选:`earliest`(default), `latest`, `none` |
| `experimental.snapshot.enable` | string | 是否允许从 TSDB 消费数据 | 合法值:`true`, `false` |
| `enable.heartbeat.background` | string | 启用后台心跳,启用后即使长时间不 poll 消息也不会造成离线 | 合法值:`true`, `false` |
</TabItem>

View File

@ -231,7 +231,7 @@ bit_add 实现多列的按位与功能。如果只有一列,返回这一列。
</details>
### 聚合函数示例 [l2norm](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/l2norm.c)
### 聚合函数示例1 返回值为数值类型 [l2norm](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/l2norm.c)
l2norm 实现了输入列的所有数据的二阶范数,即对每个数据先平方,再累加求和,最后开方。
@ -243,3 +243,29 @@ l2norm 实现了输入列的所有数据的二阶范数,即对每个数据先
```
</details>
### 聚合函数示例2 返回值为字符串类型 [max_vol](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/max_vol.c)
max_vol 实现了从多个输入的电压列中找到最大电压返回由设备ID + 最大电压所在(行,列)+ 最大电压值 组成的组合字符串值
创建表:
```bash
create table battery(ts timestamp, vol1 float, vol2 float, vol3 float, deviceId varchar(16));
```
创建自定义函数:
```bash
create aggregate function max_vol as '/root/udf/libmaxvol.so' outputtype binary(64) bufsize 10240 language 'C';
```
使用自定义函数:
```bash
select max_vol(vol1,vol2,vol3,deviceid) from battery;
```
<details>
<summary>max_vol.c</summary>
```c
{{#include tests/script/sh/max_vol.c}}
```
</details>

View File

@ -383,6 +383,133 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata
}
```
## TDengine 2.x 和 3.0 之间 REST API 的差异
### URI
| URI | TDengine 2.x | TDengine 3.0 |
| :--------------------| :------------------: | :--------------------------------------------------: |
| /rest/sql | 支持 | 支持 (响应代码和消息体不同) |
| /rest/sqlt | 支持 | 不再支持 |
| /rest/sqlutc | 支持 | 不再支持 |
### HTTP code
| HTTP code | TDengine 2.x | TDengine 3.0 | 备注 |
| :--------------------| :------------------: | :----------: | :-----------------------------------: |
| 200 | 支持 | 支持 | 正确返回和 taosc 接口错误返回 |
| 400 | 不支持 | 支持 | 参数错误返回 |
| 401 | 不支持 | 支持 | 鉴权失败 |
| 404 | 支持 | 支持 | 接口不存在 |
| 500 | 不支持 | 支持 | 内部错误 |
| 503 | 支持 | 支持 | 系统资源不足 |
### 响应代码和消息体
#### TDengine 2.x 响应代码和消息体
```JSON
{
"status": "succ",
"head": [
"name",
"created_time",
"ntables",
"vgroups",
"replica",
"quorum",
"days",
"keep1,keep2,keep(D)",
"cache(MB)",
"blocks",
"minrows",
"maxrows",
"wallevel",
"fsync",
"comp",
"precision",
"status"
],
"data": [
[
"log",
"2020-09-02 17:23:00.039",
4,
1,
1,
1,
10,
"30,30,30",
1,
3,
100,
4096,
1,
3000,
2,
"us",
"ready"
]
],
"rows": 1
}
```
```
"data": [
[
"information_schema",
16,
"ready"
],
[
"performance_schema",
9,
"ready"
]
],
```
#### TDengine 3.0 响应代码和消息体
```JSON
{
"code": 0,
"column_meta": [
[
"name",
"VARCHAR",
64
],
[
"ntables",
"BIGINT",
8
],
[
"status",
"VARCHAR",
10
]
],
"data": [
[
"information_schema",
16,
"ready"
],
[
"performance_schema",
9,
"ready"
]
],
"rows": 2
}
```
## 参考
[taosAdapter](/reference/taosadapter/)

View File

@ -17,7 +17,7 @@ import TabItem from '@theme/TabItem';
- JDBC 原生连接Java 应用在物理节点 1pnode1上使用 TSDBDriver 直接调用客户端驱动libtaos.so 或 taos.dll的 API 将写入和查询请求发送到位于物理节点 2pnode2上的 taosd 实例。
- JDBC REST 连接Java 应用通过 RestfulDriver 将 SQL 封装成一个 REST 请求,发送给物理节点 2 的 REST 服务器taosAdapter通过 REST 服务器请求 taosd 并返回结果。
使用 REST 连接,不依赖 TDengine 客户端驱动,可以跨平台,更加方便灵活,但性能比原生连接器低约 30%
使用 REST 连接,不依赖 TDengine 客户端驱动,可以跨平台,更加方便灵活。
:::info
TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但 TDengine 与关系对象型数据库的使用场景和技术特征存在差异,所以`taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点:
@ -728,7 +728,7 @@ consumer.close()
详情请参考:[数据订阅](../../../develop/tmq)
### 使用示例如下:
#### 完整示例
<Tabs defaultValue="native">
<TabItem value="native" label="原生连接">

View File

@ -122,7 +122,7 @@ _taosSql_ 通过 cgo 实现了 Go 的 `database/sql/driver` 接口。只需要
使用 `taosSql` 作为 `driverName` 并且使用一个正确的 [DSN](#DSN) 作为 `dataSourceName`DSN 支持的参数:
* configPath 指定 taos.cfg 目录
* cfg 指定 taos.cfg 目录
示例:

View File

@ -10,7 +10,9 @@ import TabItem from "@theme/TabItem";
`taospy` 是 TDengine 的官方 Python 连接器。`taospy` 提供了丰富的 API 使得 Python 应用可以很方便地使用 TDengine。`taospy` 对 TDengine 的[原生接口](../cpp)和 [REST 接口](../rest-api)都进行了封装, 分别对应 `taospy` 包的 `taos` 模块 和 `taosrest` 模块。
除了对原生接口和 REST 接口的封装,`taospy` 还提供了符合 [Python 数据访问规范(PEP 249)](https://peps.python.org/pep-0249/) 的编程接口。这使得 `taospy` 和很多第三方工具集成变得简单,比如 [SQLAlchemy](https://www.sqlalchemy.org/) 和 [pandas](https://pandas.pydata.org/)。
使用客户端驱动提供的原生接口直接与服务端建立的连接的方式下文中称为“原生连接”;使用 taosAdapter 提供的 REST 接口与服务端建立的连接的方式下文中称为“REST 连接”。
`taos-ws-py` 是使用 WebSocket 方式连接 TDengine 的 Python 连接器包。可以选装。
使用客户端驱动提供的原生接口直接与服务端建立的连接的方式下文中称为“原生连接”;使用 taosAdapter 提供的 REST 接口或 WebSocket 接口与服务端建立的连接的方式下文中称为“REST 连接”或“WebSocket 连接”。
Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-connector-python)。
@ -115,6 +117,15 @@ import taos
import taosrest
```
</TabItem>
<TabItem value="ws" label="WebSocket 连接">
对于 WebSocket 连接,只需验证是否能成功导入 `taosws` 模块。可在 Python 交互式 Shell 中输入:
```python
import taosws
```
</TabItem>
</Tabs>
@ -183,6 +194,27 @@ curl -u root:taosdata http://<FQDN>:<PORT>/rest/sql -d "select server_version()"
}
```
</TabItem>
<TabItem value="ws" label="WebSocket 连接" groupId="connect">
对于 WebSocket 连接, 除了确保集群已经启动,还要确保 taosAdapter 组件已经启动。可以使用如下 curl 命令测试:
```
curl -i -N -d "show databases" -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -H "Connection: Upgrade" -H "Upgrade: websocket" -H "Host: <FQDN>:<PORT>" -H "Origin: http://<FQDN>:<PORT>" http://<FQDN>:<PORT>/rest/sql
```
上面的 FQDN 为运行 taosAdapter 的机器的 FQDN PORT 为 taosAdapter 配置的监听端口, 默认为 6041。
如果测试成功,会输出服务器版本信息,比如:
```json
HTTP/1.1 200 OK
Content-Type: application/json; charset=utf-8
Date: Tue, 21 Mar 2023 09:29:17 GMT
Transfer-Encoding: chunked
{"status":"succ","head":["server_version()"],"column_meta":[["server_version()",8,8]],"data":[["2.6.0.27"]],"rows":1}
```
</TabItem>
</Tabs>
@ -321,6 +353,85 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
</TabItem>
</Tabs>
### 与 req_id 一起使用
使用可选的 req_id 参数,指定请求 id可以用于 tracing
<Tabs defaultValue="rest">
<TabItem value="native" label="原生连接">
##### TaosConnection 类的使用
`TaosConnection` 类既包含对 PEP249 Connection 接口的实现(如:`cursor`方法和 `close` 方法),也包含很多扩展功能(如: `execute`、 `query`、`schemaless_insert` 和 `subscribe` 方法。
```python title="execute 方法"
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:insert}}
```
```python title="query 方法"
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:query}}
```
:::tip
查询结果只能获取一次。比如上面的示例中 `fetch_all()` 和 `fetch_all_into_dict()` 只能用一个。重复获取得到的结果为空列表。
:::
##### TaosResult 类的使用
上面 `TaosConnection` 类的使用示例中,我们已经展示了两种获取查询结果的方法: `fetch_all()` 和 `fetch_all_into_dict()`。除此之外 `TaosResult` 还提供了按行迭代(`rows_iter`)或按数据块迭代(`blocks_iter`)结果集的方法。在查询数据量较大的场景,使用这两个方法会更高效。
```python title="blocks_iter 方法"
{{#include docs/examples/python/result_set_with_req_id_examples.py}}
```
##### TaosCursor 类的使用
`TaosConnection` 类和 `TaosResult` 类已经实现了原生接口的所有功能。如果你对 PEP249 规范中的接口比较熟悉也可以使用 `TaosCursor` 类提供的方法。
```python title="TaosCursor 的使用"
{{#include docs/examples/python/cursor_usage_native_reference_with_req_id.py}}
```
:::note
TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能跨线程共享使用,否则会导致返回结果出现错误。
:::
</TabItem>
<TabItem value="rest" label="REST 连接">
##### TaosRestCursor 类的使用
`TaosRestCursor` 类是对 PEP249 Cursor 接口的实现。
```python title="TaosRestCursor 的使用"
{{#include docs/examples/python/connect_rest_with_req_id_examples.py:basic}}
```
- `cursor.execute` 用来执行任意 SQL 语句。
- `cursor.rowcount` 对于写入操作返回写入成功记录数。对于查询操作,返回结果集行数。
- `cursor.description` 返回字段的描述信息。关于描述信息的具体格式请参考[TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html)。
##### RestClient 类的使用
`RestClient` 类是对于 [REST API](../rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。
```python title="RestClient 的使用"
{{#include docs/examples/python/rest_client_with_req_id_example.py}}
```
对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。
</TabItem>
<TabItem value="websocket" label="WebSocket 连接">
```python
{{#include docs/examples/python/connect_websocket_with_req_id_examples.py:basic}}
```
- `conn.execute`: 用来执行任意 SQL 语句,返回影响的行数
- `conn.query`: 用来执行查询 SQL 语句,返回查询结果
</TabItem>
</Tabs>
### 与 pandas 一起使用
<Tabs defaultValue="rest">
@ -373,6 +484,56 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
</TabItem>
</Tabs>
### 无模式写入
连接器支持无模式写入功能。
<Tabs defaultValue="list">
<TabItem value="list" label="List 写入">
简单写入
```python
{{#include docs/examples/python/schemaless_insert.py}}
```
带有 ttl 参数的写入
```python
{{#include docs/examples/python/schemaless_insert_ttl.py}}
```
带有 req_id 参数的写入
```python
{{#include docs/examples/python/schemaless_insert_req_id.py}}
```
</TabItem>
<TabItem value="raw" label="Raw 写入">
简单写入
```python
{{#include docs/examples/python/schemaless_insert_raw.py}}
```
带有 ttl 参数的写入
```python
{{#include docs/examples/python/schemaless_insert_raw_ttl.py}}
```
带有 req_id 参数的写入
```python
{{#include docs/examples/python/schemaless_insert_raw_req_id.py}}
```
</TabItem>
</Tabs>
### 其它示例程序
| 示例程序链接 | 示例程序内容 |

View File

@ -14,7 +14,7 @@ import PkgListV3 from "/components/PkgListV3";
解压软件包之后,会在解压目录下看到以下文件(目录)
- _ install_client.sh_安装脚本用于应用驱动程序
- _ taos.tar.gz_应用驱动安装包
- _ package.tar.gz_应用驱动安装包
- _ driver_TDengine 应用驱动 driver
- _examples_: 各种编程语言的示例程序(c/C#/go/JDBC/MATLAB/python/R)
运行 install_client.sh 进行安装。

View File

@ -35,7 +35,6 @@ database_option: {
| TABLE_SUFFIX value
| TSDB_PAGESIZE value
| WAL_RETENTION_PERIOD value
| WAL_ROLL_PERIOD value
| WAL_RETENTION_SIZE value
| WAL_SEGMENT_SIZE value
}
@ -75,11 +74,10 @@ database_option: {
- TABLE_PREFIX内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。
- TABLE_SUFFIX内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。
- TSDB_PAGESIZE一个 VNODE 中时序数据存储引擎的页大小,单位为 KB默认为 4 KB。范围为 1 到 16384即 1 KB到 16 MB。
- WAL_RETENTION_PERIOD数据订阅已消费WAL日志WAL文件的最大额外保留的时长策略。单位为 s。默认为 0表示无需额外保留。-1, 表示额外保留,时间无上限
- WAL_RETENTION_SIZE数据订阅消费WAL日志WAL文件的最大额外保留的累计大小策略。单位为 KB。默认为 0表示无需额外保留。-1, 表示额外保留,累计大小无上限。
- WAL_RETENTION_PERIOD: 为了数据订阅消费需要WAL日志文件额外保留的最大时长策略。WAL日志清理不受订阅客户端消费状态影响。单位为 s。默认为 0表示无需为订阅保留。新建订阅应先设置恰当的时长策略
- WAL_RETENTION_SIZE为了数据订阅消费,需要WAL日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0表示累计大小无上限。
- WAL_ROLL_PERIODwal 文件切换时长,单位为 s。当WAL文件创建并写入后经过该时间会自动创建一个新的WAL文件。默认为 0即仅在TSDB落盘时创建新文件。
- WAL_SEGMENT_SIZEwal 单个文件大小,单位为 KB。当前写入文件大小超过上限后会自动创建一个新的WAL文件。默认为 0即仅在TSDB落盘时创建新文件。
### 创建数据库示例
```sql
@ -179,6 +177,14 @@ TRIM DATABASE db_name;
删除过期数据,并根据多级存储的配置归整数据。
## 落盘内存数据
```sql
FLUSH DATABASE db_name;
```
落盘内存中的数据。在关闭节点之前,执行这条命令可以避免重启后的数据回放,加速启动过程。
## 调整VGROUP中VNODE的分布
```sql
@ -194,3 +200,11 @@ BALANCE VGROUP
```
自动调整集群所有vgroup中的vnode分布相当于在vnode级别对集群进行数据的负载均衡操作。
## 查看数据库工作状态
```sql
SHOW db_name.ALIVE;
```
查询数据库 db_name 的可用状态,返回值 0不可用 1完全可用 2部分可用即数据库包含的 VNODE 部分节点可用,部分节点不可用)

View File

@ -13,12 +13,11 @@ create_definition:
col_name column_definition
column_definition:
type_name [COMMENT 'string_value']
type_name
```
**使用说明**
- 超级表中列的最大个数为 4096需要注意这里的 4096 是包含 TAG 列在内的,最小个数为 3包含一个时间戳主键、一个 TAG 列和一个数据列。
- 建表时可以给列或标签附加注释。
- TAGS语法指定超级表的标签列标签列需要遵循以下约定
- TAGS 中的 TIMESTAMP 列写入数据时需要提供给定值,而暂不支持四则运算,例如 NOW + 10s 这类表达式。
- TAGS 列名不能与其他列名相同。

View File

@ -248,11 +248,11 @@ NULLS 语法用来指定 NULL 值在排序中输出的位置。NULLS LAST 是升
LIMIT 控制输出条数OFFSET 指定从第几条之后开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。LIMIT 5 OFFSET 2 可以简写为 LIMIT 2, 5都输出第 3 行到第 7 行数据。
在有 PARTITION BY 子句时LIMIT 控制的是每个切分的分片中的输出,而不是总的结果集输出。
在有 PARTITION BY/GROUP BY 子句时LIMIT 控制的是每个切分的分片中的输出,而不是总的结果集输出。
## SLIMIT
SLIMIT 和 PARTITION BY 子句一起使用用来控制输出的分片的数量。SLIMIT 5 SOFFSET 2 可以简写为 SLIMIT 2, 5都表示输出第 3 个到第 7 个分片。
SLIMIT 和 PARTITION BY/GROUP BY 子句一起使用用来控制输出的分片的数量。SLIMIT 5 SOFFSET 2 可以简写为 SLIMIT 2, 5都表示输出第 3 个到第 7 个分片。
需要注意,如果有 ORDER BY 子句,则输出只有一个分片。

View File

@ -459,12 +459,17 @@ TO_JSON(str_literal)
#### TO_UNIXTIMESTAMP
```sql
TO_UNIXTIMESTAMP(expr)
TO_UNIXTIMESTAMP(expr [, return_timestamp])
return_timestamp: {
0
| 1
}
```
**功能说明**:将日期时间格式的字符串转换成为 UNIX 时间戳。
**返回结果数据类型**BIGINT。
**返回结果数据类型**BIGINT, TIMESTAMP
**应用字段**VARCHAR, NCHAR。
@ -476,6 +481,7 @@ TO_UNIXTIMESTAMP(expr)
- 输入的日期时间字符串须符合 ISO8601/RFC3339 标准,无法转换的字符串格式将返回 NULL。
- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。
- return_timestamp 指定函数返回值是否为时间戳类型设置为1时返回 TIMESTAMP 类型设置为0时返回 BIGINT 类型。如不指定缺省返回 BIGINT 类型。
### 时间和日期函数

View File

@ -69,6 +69,16 @@ FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填
5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如FILL(LINEAR)。
6. NEXT 填充:使用下一个非 NULL 值填充数据。例如FILL(NEXT)。
以上填充模式中,除了 NONE 模式默认不填充值之外,其他模式在查询的整个时间范围内如果没有数据 FILL 子句将被忽略即不产生填充数据查询结果为空。这种行为在部分模式PREV、NEXT、LINEAR下具有合理性因为在这些模式下没有数据意味着无法产生填充数值。而对另外一些模式NULL、VALUE来说理论上是可以产生填充数值的至于需不需要输出填充数值取决于应用的需求。所以为了满足这类需要强制填充数据或 NULL 的应用的需求,同时不破坏现有填充模式的行为兼容性,从 3.0.3.0 版本开始,增加了两种新的填充模式:
7. NULL_F: 强制填充 NULL 值
8. VALUE_F: 强制填充 VALUE 值
NULL, NULL_F, VALUE, VALUE_F 这几种填充模式针对不同场景区别如下:
- INTERVAL 子句: NULL_F, VALUE_F 为强制填充模式NULL, VALUE 为非强制模式。在这种模式下下各自的语义与名称相符
- 流计算中的 INTERVAL 子句NULL_F 与 NULL 行为相同均为非强制模式VALUE_F 与 VALUE 行为相同,均为非强制模式。即流计算中的 INTERVAL 没有强制模式
- INTERP 子句NULL 与 NULL_F 行为相同均为强制模式VALUE 与 VALUE_F 行为相同,均为强制模式。即 INTERP 中没有非强制模式。
:::info
1. 使用 FILL 语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过 1 千万条具有插值的结果。
@ -153,7 +163,7 @@ SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val);
以下面的 SQL 语句为例,事件窗口切分如图所示:
```sql
select _wstart, _wend, count(*) from t start with c1 > 0 end with c2 < 10
select _wstart, _wend, count(*) from t event_window start with c1 > 0 end with c2 < 10
```
![TDengine Database 事件窗口示意图](./event_window.webp)

View File

@ -8,10 +8,13 @@ description: 流式计算的相关 SQL 的详细语法
## 创建流式计算
```sql
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name SUBTABLE(expression) AS subquery
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name[(field1_name, ...)] [TAGS (create_definition [, create_definition] ...)] SUBTABLE(expression) AS subquery
stream_options: {
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
WATERMARK time
IGNORE EXPIRED [0|1]
DELETE_MARK time
FILL_HISTORY [0|1]
}
```
@ -28,6 +31,15 @@ subquery: SELECT select_list
支持会话窗口、状态窗口与滑动窗口其中会话窗口与状态窗口搭配超级表时必须与partition by tbname一起使用
stb_name 是保存计算结果的超级表的表名如果该超级表不存在会自动创建如果已存在则检查列的schema信息。详见 写入已存在的超级表
TAGS 字句定义了流计算中创建TAG的规则可以为每个partition对应的子表生成自定义的TAG值详见 自定义TAG
```sql
create_definition:
col_name column_definition
column_definition:
type_name [COMMENT 'string_value']
```
subtable 子句定义了流式计算中创建的子表的命名规则,详见 流式计算的 partition 部分。
@ -114,7 +126,7 @@ SELECT * from information_schema.`ins_streams`;
在创建流时,可以通过 TRIGGER 指令指定流式计算的触发模式。
对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 3 种触发模式,默认为 AT_ONCE
对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 3 种触发模式,默认为 WINDOW_CLOSE
1. AT_ONCE写入立即触发
@ -163,9 +175,67 @@ T3 时刻最新事件到达T 向后推移超过了第二个窗口关闭的
TDengine 对于过期数据提供两种处理方式,由 IGNORE EXPIRED 选项指定:
1. 重新计算,即 IGNORE EXPIRED 0默认配置,从 TSDB 中重新查找对应窗口的所有数据并重新计算得到最新结果
1. 重新计算,即 IGNORE EXPIRED 0从 TSDB 中重新查找对应窗口的所有数据并重新计算得到最新结果
2. 直接丢弃, 即 IGNORE EXPIRED 1忽略过期数据
2. 直接丢弃, 即 IGNORE EXPIRED 1默认配置,忽略过期数据
无论在哪种模式下watermark 都应该被妥善设置,来得到正确结果(直接丢弃模式)或避免频繁触发重算带来的性能开销(重新计算模式)。
## 写入已存在的超级表
```sql
[field1_name,...]
```
用来指定stb_name的列与subquery输出结果的对应关系。如果stb_name的列与subquery输出结果的位置、数量全部匹配则不需要显示指定对应关系。如果stb_name的列与subquery输出结果的数据类型不匹配会把subquery输出结果的类型转换成对应的stb_name的列的类型。
对于已经存在的超级表检查列的schema信息
1. 检查列的schema信息是否匹配对于不匹配的则自动进行类型转换当前只有数据长度大于4096byte时才报错其余场景都能进行类型转换。
2. 检查列的个数是否相同如果不同需要显示的指定超级表与subquery的列的对应关系否则报错如果相同可以指定对应关系也可以不指定不指定则按位置顺序对应。
3. 至少自定义一个tag否则报错。详见 自定义TAG
## 自定义TAG
用户可以为每个 partition 对应的子表生成自定义的TAG值。
```sql
CREATE STREAM streams2 trigger at_once INTO st1 TAGS(cc varchar(100)) as select _wstart, count(*) c1 from st partition by concat("tag-", tbname) as cc interval(10s));
```
PARTITION 子句中,为 concat("tag-", tbname)定义了一个别名cc, 对应超级表st1的自定义TAG的名字。在上述示例中流新创建的子表的TAG将以前缀 'new-' 连接原表名作为TAG的值。
会对TAG信息进行如下检查
1.检查tag的schema信息是否匹配对于不匹配的则自动进行数据类型转换当前只有数据长度大于4096byte时才报错其余场景都能进行类型转换。
2.检查tag的个数是否相同如果不同需要显示的指定超级表与subquery的tag的对应关系否则报错如果相同可以指定对应关系也可以不指定不指定则按位置顺序对应。
## 清理中间状态
```
DELETE_MARK time
```
DELETE_MARK用于删除缓存的窗口状态也就是删除流计算的中间结果。如果不设置默认值是10年
T = 最新事件时间 - DELETE_MARK
## 流式计算支持的函数
1. 所有的单行函数均可用于流计算中。
2. 以下 19 个聚合函数不能在创建流计算的 SQL 语句中使用
```
leastsquares
percentile
top
bottom
elapsed
interp
derivative
irate
twa
histogram
diff
statecount
stateduration
csum
mavg
sample
tail
unique
mode
```

View File

@ -269,7 +269,7 @@ description: TDengine 保留关键字的详细列表
- SPLIT
- STABLE
- STABLES
- STAR
- START
- STATE
- STATE_WINDOW
- STATEMENT

View File

@ -119,6 +119,14 @@ DROP QNODE ON DNODE dnode_id;
删除 ID 为 dnode_id 的 DNODE 上的 QNODE但并不会影响该 dnode 的状态。
## 查询集群状态
```sql
SHOW CLUSTER ALIVE;
```
查询当前集群的状态是否可用,返回值: 0不可用 1完全可用 2部分可用集群中部分节点下线但其它节点仍可以正常使用
## 修改客户端配置
如果将客户端也看作广义的集群的一部分,可以通过如下命令动态修改客户端配置参数。

View File

@ -69,7 +69,7 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
| 1 | consumer_id | BIGINT | 消费者的唯一 ID |
| 2 | consumer_group | BINARY(192) | 消费者组 |
| 3 | client_id | BINARY(192) | 用户自定义字符串,通过创建 consumer 时指定 client_id 来展示 |
| 4 | status | BINARY(20) | 消费者当前状态 |
| 4 | status | BINARY(20) | 消费者当前状态。消费者状态包括ready(正常可用)、 lost(连接已丢失)、 rebalancing(消费者所属 vgroup 正在分配中)、unknown(未知状态)|
| 5 | topics | BINARY(204) | 被订阅的 topic。若订阅多个 topic则展示为多行 |
| 6 | up_time | TIMESTAMP | 第一次连接 taosd 的时间 |
| 7 | subscribe_time | TIMESTAMP | 上一次发起订阅的时间 |

View File

@ -253,8 +253,9 @@ Query OK, 24 row(s) in set (0.002444s)
</code></pre>
</details>
上面是块中包含数据行数的块儿分布情况图,这里的 0100 0299 0498 … 表示的是每个块中包含的数据行数,上面的意思就是这个表的 5 个块,分布在 3483 ~3681 行的块有 1 个,占整个块的 20%,分布在 3881 ~ 4096最大行数的块数为 4 个,占整个块的 80% 其它区域内分布块数为 0。
上面是块中包含数据行数的块儿分布情况图,这里的 0100 0299 0498 … 表示的是每个块中包含的数据行数,上面的意思就是这个表的 5 个块,分布在 3483 ~3681 行的块有 1 个,占整个块的 20%,分布在 3881 ~ 4096最大行数的块数为 4 个,占整个块的 80% 其它区域内分布块数为 0。
需要注意,这里只会显示 data 文件中数据块的信息stt 文件中的数据的信息不会被显示。
## SHOW TAGS

View File

@ -27,7 +27,7 @@ description: "TDengine 3.0 版本的语法变更说明"
| - | :------- | :-------- | :------- |
| 1 | ALTER ACCOUNT | 废除 | 2.x中为企业版功能3.0不再支持。语法暂时保留了执行报“This statement is no longer supported”错误。
| 2 | ALTER ALL DNODES | 新增 | 修改所有DNODE的参数。
| 3 | ALTER DATABASE | 调整 | 废除<ul><li>QUORUM写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。3.0.0版本STRICT暂不支持修改</li><li>BLOCKSVNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>UPDATE更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>CACHELAST缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>COMP3.0版本暂不支持修改。<br/>新增</li><li>CACHEMODEL表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE表示缓存子表最近数据的内存大小。</li><li>WAL_FSYNC_PERIOD代替原FSYNC参数。</li><li>WAL_LEVEL代替原WAL参数。<br/>调整</li><li>REPLICA3.0.0版本暂不支持修改。</li><li>KEEP3.0版本新增支持带单位的设置方式。</li></ul>
| 3 | ALTER DATABASE | 调整 | 废除<ul><li>QUORUM写入需要的副本确认数。3.0 版本默认行为是强一致性,且不支持修改为弱一致性</li><li>BLOCKSVNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>UPDATE更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>CACHELAST缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>COMP3.0版本暂不支持修改。</li><br/>新增<li>CACHEMODEL表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE表示缓存子表最近数据的内存大小。</li><li>WAL_FSYNC_PERIOD代替原FSYNC参数。</li><li>WAL_LEVEL代替原WAL参数</li><li>WAL_RETENTION_PERIOD3.0.4.0版本新增wal文件的额外保留策略用于数据订阅。</li><li>WAL_RETENTION_SIZE3.0.4.0版本新增wal文件的额外保留策略用于数据订阅<br/>调整</li><li>REPLICA3.0.0版本暂不支持修改。</li><li>KEEP3.0版本新增支持带单位的设置方式。</li></ul>
| 4 | ALTER STABLE | 调整 | 废除<ul><li>CHANGE TAG修改标签列的名称。3.0版本使用RENAME TAG代替。<br/>新增</li><li>RENAME TAG代替原CHANGE TAG子句。</li><li>COMMENT修改超级表的注释。</li></ul>
| 5 | ALTER TABLE | 调整 | 废除<ul><li>CHANGE TAG修改标签列的名称。3.0版本使用RENAME TAG代替。<br/>新增</li><li>RENAME TAG代替原CHANGE TAG子句。</li><li>COMMENT修改表的注释。</li><li>TTL修改表的生命周期。</li></ul>
| 6 | ALTER USER | 调整 | 废除<ul><li>PRIVILEGE修改用户权限。3.0版本使用GRANT和REVOKE来授予和回收权限。<br/>新增</li><li>ENABLE启用或停用此用户。</li><li>SYSINFO修改用户是否可查看系统信息。</li></ul>

View File

@ -66,15 +66,13 @@ Usage of taosAdapter:
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
--cors.allowHeaders stringArray cors allow HEADERS. Env "TAOS_ADAPTER_ALLOW_HEADERS"
--cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS"
--cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets"
--cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers"
--cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets" --cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers"
--debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" (default true)
--help Print this help message and exit
--httpCodeServerError Use a non-200 http status code when taosd returns an error. Env "TAOS_ADAPTER_HTTP_CODE_SERVER_ERROR"
--httpCodeServerError Use a non-200 http status code when server returns an error. Env "TAOS_ADAPTER_HTTP_CODE_SERVER_ERROR"
--influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true)
--log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL"
--log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos")
--log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30)
--log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos") --log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30)
--log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_ROTATION_SIZE" (default "1GB")
--log.rotationTime duration log rotation time. Env "TAOS_ADAPTER_LOG_ROTATION_TIME" (default 24h0m0s)
--log.sqlRotationCount uint record sql log rotation count. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_COUNT" (default 2)
@ -89,8 +87,7 @@ Usage of taosAdapter:
--monitor.password string TDengine password. Env "TAOS_ADAPTER_MONITOR_PASSWORD" (default "taosdata")
--monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_ADAPTER_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80)
--monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_ADAPTER_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70)
--monitor.user string TDengine user. Env "TAOS_ADAPTER_MONITOR_USER" (default "root")
--monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_ADAPTER_MONITOR_WRITE_INTERVAL" (default 30s)
--monitor.user string TDengine user. Env "TAOS_ADAPTER_MONITOR_USER" (default "root") --monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_ADAPTER_MONITOR_WRITE_INTERVAL" (default 30s)
--monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_ADAPTER_MONITOR_WRITE_TO_TD"
--node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE"
--node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE"
@ -119,14 +116,14 @@ Usage of taosAdapter:
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT"
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT"
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE"
--pool.maxConnect int max connections to server. Env "TAOS_ADAPTER_POOL_MAX_CONNECT"
--pool.maxIdle int max idle connections to server. Env "TAOS_ADAPTER_POOL_MAX_IDLE"
-P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041)
--prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true)
--restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1)
--smlAutoCreateDB Whether to automatically create db when writing with schemaless. Env "TAOS_ADAPTER_SML_AUTO_CREATE_DB"
--statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000)
--statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd")
--statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true)
--statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd") --statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true)
--statsd.deleteGauges statsd delete gauge cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_GAUGES" (default true)
--statsd.deleteSets statsd delete set cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_SETS" (default true)
--statsd.deleteTimings statsd delete timing cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_TIMINGS" (default true)
@ -136,11 +133,11 @@ Usage of taosAdapter:
--statsd.password string statsd password. Env "TAOS_ADAPTER_STATSD_PASSWORD" (default "taosdata")
--statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044)
--statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp")
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
--statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL"
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE" --statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL"
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
--tmq.releaseIntervalMultiplierForAutocommit int When set to autocommit, the interval for message release is a multiple of the autocommit interval, with a default value of 2 and a minimum value of 1 and a maximum value of 10. Env "TAOS_ADAPTER_TMQ_RELEASE_INTERVAL_MULTIPLIER_FOR_AUTOCOMMIT" (default 2)
--version Print the version and exit
```
@ -331,6 +328,10 @@ taosAdapter 通过参数 `restfulRowLimit` 来控制结果的返回条数,-1
taosAdapter 通过参数 `httpCodeServerError` 来设置当 C 接口返回错误时是否返回非 200 的 http 状态码。当设置为 true 时将根据 C 返回的错误码返回不同 http 状态码。具体见 [HTTP 响应码](../../connector/rest-api/#http-响应码)。
## 配置 schemaless 写入是否自动创建 DB
taosAdapter 从 3.0.4.0 版本开始,提供参数 `smlAutoCreateDB` 来控制在 schemaless 协议写入时是否自动创建 DB。默认值为 false 不自动创建 DB需要用户手动创建 DB 后进行 schemaless 写入。
## 故障解决
您可以通过命令 `systemctl status taosadapter` 来检查 taosAdapter 运行状态。

View File

@ -400,11 +400,11 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
#### 执行指定查询语句的配置参数
查询子表或普通表的配置参数在 `specified_table_query` 中设置。
查询指定表(可以指定超级表、子表或普通表的配置参数在 `specified_table_query` 中设置。
- **query_interval** : 查询时间间隔,单位是秒,默认值为 0。
- **threads** : 执行查询 SQL 的线程数,默认值为 1。
- **threads/concurrent** : 执行查询 SQL 的线程数,默认值为 1。
- **sqls**
- **sql**: 执行的 SQL 命令,必填。
@ -431,7 +431,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
#### 执行指定订阅语句的配置参数
订阅子表或者普通表的配置参数在 `specified_table_query` 中设置。
订阅指定表(可以指定超级表、子表或者普通表的配置参数在 `specified_table_query` 中设置。
- **threads/concurrent** : 执行 SQL 的线程数,默认为 1。

View File

@ -43,8 +43,6 @@ sudo apt-get update
sudo apt-get install grafana
```
### 在 CentOS / RHEL 上安装 Grafana
</TabItem>
<TabItem value="redhat" label="基于 CentOS / RHEL 系统">
@ -79,7 +77,37 @@ sudo yum install \
</Tabs>
<Tabs defaultValue="auto" groupId="deploy">
### 安装 TDengine 数据源插件
<Tabs defaultValue="manual" groupId="deploy">
<TabItem value="manual" label="手动设置 TDinsight">
从 GitHub 安装 TDengine 最新版数据源插件。
```bash
get_latest_release() {
curl --silent "https://api.github.com/repos/taosdata/grafanaplugin/releases/latest" |
grep '"tag_name":' |
sed -E 's/.*"v([^"]+)".*/\1/'
}
TDENGINE_PLUGIN_VERSION=$(get_latest_release)
sudo grafana-cli \
--pluginUrl https://github.com/taosdata/grafanaplugin/releases/download/v$TDENGINE_PLUGIN_VERSION/tdengine-datasource-$TDENGINE_PLUGIN_VERSION.zip \
plugins install tdengine-datasource
```
:::note
3.1.6 和更早版本插件需要在配置文件 `/etc/grafana/grafana.ini` 中添加如下设置,以启用未签名插件。
```ini
[plugins]
allow_loading_unsigned_plugins = tdengine-datasource
```
:::
</TabItem>
<TabItem value="auto" label="自动部署 TDinsight">
我们提供了一个自动化安装脚本 [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh) 脚本以便用户快速进行安装配置。
@ -175,33 +203,7 @@ sudo ./TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -
特别地,当您使用 Grafana Cloud 或其他组织时,`-O` 可用于设置组织 ID。 `-G` 可指定 Grafana 插件安装目录。 `-e` 参数将仪表盘设置为可编辑。
</TabItem>
<TabItem value="manual" label="手动设置 TDinsight">
### 安装 TDengine 数据源插件
从 GitHub 安装 TDengine 最新版数据源插件。
```bash
get_latest_release() {
curl --silent "https://api.github.com/repos/taosdata/grafanaplugin/releases/latest" |
grep '"tag_name":' |
sed -E 's/.*"v([^"]+)".*/\1/'
}
TDENGINE_PLUGIN_VERSION=$(get_latest_release)
sudo grafana-cli \
--pluginUrl https://github.com/taosdata/grafanaplugin/releases/download/v$TDENGINE_PLUGIN_VERSION/tdengine-datasource-$TDENGINE_PLUGIN_VERSION.zip \
plugins install tdengine-datasource
```
:::note
3.1.6 和更早版本插件需要在配置文件 `/etc/grafana/grafana.ini` 中添加如下设置,以启用未签名插件。
```ini
[plugins]
allow_loading_unsigned_plugins = tdengine-datasource
```
:::
</Tabs>
### 启动 Grafana 服务
@ -233,8 +235,7 @@ sudo systemctl enable grafana-server
![TDengine Database TDinsight 数据源测试](./assets/howto-add-datasource-test.webp)
</TabItem>
</Tabs>
### 导入仪表盘

View File

@ -61,12 +61,14 @@ taos> SET MAX_BINARY_DISPLAY_WIDTH <nn>;
- -c CONFIGDIR: 指定配置文件目录Linux 环境下默认为 `/etc/taos`,该目录下的配置文件默认名称为 `taos.cfg`
- -C: 打印 -c 指定的目录中 `taos.cfg` 的配置参数
- -d DATABASE: 指定连接到服务端时使用的数据库
- -E dsn: 使用 WebSocket DSN 连接云服务或者提供 WebSocket 连接的服务端
- -f FILE: 以非交互模式执行 SQL 脚本文件。文件中一个 SQL 语句只能占一行
- -k: 测试服务端运行状态0: unavailable1: network ok2: service ok3: service degraded4: exiting
- -l PKTLEN: 网络测试时使用的测试包大小
- -n NETROLE: 网络连接测试时的测试范围,默认为 `client`, 可选值为 `client`、`server`
- -N PKTNUM: 网络测试时使用的测试包数量
- -r: 将时间输出出无符号 64 位整数类型(即 C 语音中 uint64_t)
- -R: 使用 RESTful 模式连接服务端
- -s COMMAND: 以非交互模式执行的 SQL 命令
- -t: 测试服务端启动状态,状态同-k
- -w DISPLAYWIDTH: 客户端列显示宽度

View File

@ -99,6 +99,9 @@ taos --dump-config
## 监控相关
:::note
请注意,完整的监控功能需要安装并运行 `taoskeeper` 服务。taoskeeper 负责接收监控指标数据并创建 `log` 库。
### monitor
| 属性 | 说明 |
@ -623,6 +626,15 @@ charset 的有效值是 UTF-8。
| 缺省值 | 1 |
| 补充说明 | 不同的启动方式,生成 core 文件的目录如下1、systemctl start taosd 启动:生成的 core 在根目录下 <br/> 2、手动启动就在 taosd 执行目录下。 |
### enableScience
| 属性 | 说明 |
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
| 适用范围 | 仅客户端 TAOS-CLI 适用 |
| 含义 | 是否开启科学计数法显示浮点数 |
| 取值范围 | 01是 |
| 缺省值 | 0 |
### udf
| 属性 | 说明 |

View File

@ -4,23 +4,25 @@ title: taosKeeper
description: TDengine 3.0 版本监控指标的导出工具
---
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
## 简介
taosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的几项配置即可获取 TDengine 的运行状态。taosKeeper 使用 TDengine RESTful 接口,所以不需要安装 TDengine 客户端即可使用。
## 安装
<!-- taosKeeper 有两种安装方式: -->
taosKeeper 有两种安装方式:
taosKeeper 安装方式:
<!-- - 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[ TDengine 安装](/operation/pkg-install)。-->
- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[ TDengine 安装](/operation/pkg-install)。
<!-- - 单独编译 taosKeeper 并安装,详情请参考 [taosKeeper](https://github.com/taosdata/taoskeeper) 仓库。-->
- 单独编译 taosKeeper 并安装,详情请参考 [taosKeeper](https://github.com/taosdata/taoskeeper) 仓库。
## 运行
## 配置和运行方式
### 配置和运行方式
### 配置
taosKeeper 需要在操作系统终端执行,该工具支持三种配置方式:[命令行参数](#命令行参数启动)、[环境变量](#环境变量启动) 和 [配置文件](#配置文件启动)。优先级为:命令行参数、环境变量、配置文件参数。
@ -34,29 +36,82 @@ monitorFqdn localhost # taoskeeper 服务的 FQDN
TDengine 监控配置相关,具体请参考:[TDengine 监控配置](../config/#监控相关)。
### 命令行参数启动
### 启动
在使用命令行参数运行 taosKeeper 并控制其行为。
<Tabs>
<TabItem label="Linux" value="linux">
```shell
$ taosKeeper
安装后,请使用 `systemctl` 命令来启动 taoskeeper 的服务进程。
```bash
systemctl start taoskeeper
```
### 环境变量启动
检查服务是否正常工作:
通过设置环境变量达到控制启动参数的目的,通常在容器中运行时使用。
```shell
$ export TAOS_KEEPER_TDENGINE_HOST=192.168.64.3
$ taoskeeper
```bash
systemctl status taoskeeper
```
具体参数列表请参照 `taoskeeper -h` 输入结果。
如果服务进程处于活动状态,则 status 指令会显示如下的相关信息:
### 配置文件启动
```
Active: active (running)
```
执行以下命令即可快速体验 taosKeeper。当不指定 taosKeeper 配置文件时,优先使用 `/etc/taos/keeper.toml` 配置,否则将使用默认配置。
如果后台服务进程处于停止状态,则 status 指令会显示如下的相关信息:
```
Active: inactive (dead)
```
如下 `systemctl` 命令可以帮助你管理 taoskeeper 服务:
- 启动服务进程:`systemctl start taoskeeper`
- 停止服务进程:`systemctl stop taoskeeper`
- 重启服务进程:`systemctl restart taoskeeper`
- 查看服务状态:`systemctl status taoskeeper`
:::info
- `systemctl` 命令需要 _root_ 权限来运行,如果您非 _root_ 用户,请在命令前添加 `sudo`
- 如果系统中不支持 `systemd`,也可以用手动运行 `/usr/local/taos/bin/taoskeeper` 方式启动 taoskeeper 服务。
- 故障排查:
- 如果服务异常请查看系统日志获取更多信息。
:::
</TabItem>
<TabItem label="macOS" value="macOS">
安装后,可以运行 `sudo launchctl start com.tdengine.taoskeeper` 来启动 taoskeeper 服务进程。
如下 `launchctl` 命令用于管理 taoskeeper 服务:
- 启动服务进程:`sudo launchctl start com.tdengine.taoskeeper`
- 停止服务进程:`sudo launchctl stop com.tdengine.taoskeeper`
- 查看服务状态:`sudo launchctl list | grep taoskeeper`
:::info
- `launchctl` 命令管理`com.tdengine.taoskeeper`需要管理员权限,务必在前面加 `sudo` 来增强安全性。
- `sudo launchctl list | grep taoskeeper` 指令返回的第一列是 `taoskeeper` 程序的 PID若为 `-` 则说明 taoskeeper 服务未运行。
- 故障排查:
- 如果服务异常请查看系统日志获取更多信息。
:::
</TabItem>
</Tabs>
#### 配置文件启动
执行以下命令即可快速体验 taosKeeper。当不指定 taosKeeper 配置文件时,优先使用 `/etc/taos/taoskeeper.toml` 配置,否则将使用默认配置。
```shell
$ taoskeeper -c <keeper config file>
@ -101,6 +156,10 @@ database = "log"
# 指定需要监控的普通表
tables = []
# database options for db storing metrics data
[metrics.databaseoptions]
cachemodel = "none"
```
### 获取监控指标
@ -150,3 +209,46 @@ taos_cluster_info_dnodes_total{cluster_id="5981392874047724755"} 1
# TYPE taos_cluster_info_first_ep gauge
taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1
```
### check\_health
```
$ curl -i http://127.0.0.1:6043/check_health
```
返回结果:
```
HTTP/1.1 200 OK
Content-Type: application/json; charset=utf-8
Date: Mon, 03 Apr 2023 07:20:38 GMT
Content-Length: 19
{"version":"1.0.0"}
```
### 集成 Prometheus
taoskeeper 提供了 `/metrics` 接口,返回了 Prometheus 格式的监控数据Prometheus 可以从 taoskeeper 抽取监控数据,实现通过 Prometheus 监控 TDengine 的目的。
#### 抽取配置
Prometheus 提供了 `scrape_configs` 配置如何从 endpoint 抽取监控数据,通常只需要修改 `static_configs` 中的 targets 配置为 taoskeeper 的 endpoint 地址,更多配置信息请参考 [Prometheus 配置文档](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config)。
```
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
- job_name: "taoskeeper"
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ["localhost:6043"]
```
#### Dashboard
我们提供了 `TaosKeeper Prometheus Dashboard for 3.x` dashboard提供了和 TDinsight 类似的监控 dashboard。
在 Grafana Dashboard 菜单点击 `import`dashboard ID 填写 `18587`,点击 `Load` 按钮即可导入 `TaosKeeper Prometheus Dashboard for 3.x` dashboard。

View File

@ -141,8 +141,20 @@ taos tools is uninstalled successfully!
```
</TabItem>
<TabItem label="Windows 卸载" value="windows">
在 C:\TDengine 目录下,通过运行 unins000.exe 卸载程序来卸载 TDengine。
</TabItem>
<TabItem label="Mac 卸载" value="mac">
卸载 TDengine 命令如下:
```
$ rmtaos
TDengine is removed successfully!
```
</TabItem>
</Tabs>

View File

@ -19,12 +19,8 @@ TDengine 接收到应用的请求数据包时,先将请求的原始数据包
## 灾备
TDengine 的集群通过多个副本的机制,来提供系统的高可用性,同时具备一定的灾备能力
TDengine 灾备是通过在异地的两个数据中心中设置两个 TDengine 集群并利用 taosX 的数据复制能力来实现的。假定两个集群为集群 A 和集群 B其中集群 A 为源集群,承担写入请求并提供查询服务。则在集群 A 所在数据中心中可以配置 taosX 利用 TDengine 提供的数据订阅能力,实时消费集群 A 中新写入的数据,并同步到集群 B。如果发生了灾难导致集群 A 所在数据中心不可用,则可以启用集群 B 作为数据写入和查询的主节点,并在集群 B 所处数据中心中配置 taosX 将数据复制到已经恢复的集群 A 或者新建的集群 C
TDengine 集群是由 mnode 负责管理的,为保证 mnode 的高可靠,可以配置 三个 mnode 副本。为保证元数据的强一致性mnode 副本之间通过同步方式进行数据复制,保证了元数据的强一致性
利用 taosX 的数据复制能力也可以构造出更复杂的灾备方案
TDengine 集群中的时序数据的副本数是与数据库关联的,一个集群里可以有多个数据库,每个数据库可以配置不同的副本数。创建数据库时,通过参数 replica 指定副本数。为了支持高可靠,需要设置副本数为 3。
TDengine 集群的节点数必须大于等于副本数,否则创建表时将报错。
当 TDengine 集群中的节点部署在不同的物理机上并设置多个副本数时就实现了系统的高可靠性无需再使用其他软件或工具。TDengine 企业版还可以将副本部署在不同机房,从而实现异地容灾。
taosX 只在 TDengine 企业版中提供,关于其具体细节,请联系 business@taosdata.com

View File

@ -38,3 +38,303 @@ chmod +x TDinsight.sh
运行程序并重启 Grafana 服务,打开面板:`http://localhost:3000/d/tdinsight`。
更多使用场景和限制请参考[TDinsight](/reference/tdinsight/) 文档。
## log 库
TDinsight dashboard 数据来源于 log 库存放监控数据的默认db可以在 taoskeeper 配置文件中修改,具体参考 [taoskeeper 文档](/reference/taosKeeper)。taoskeeper 启动后会自动创建 log 库,并将监控数据写入到该数据库中。
### cluster\_info 表
`cluster_info` 表记录集群信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|first\_ep|VARCHAR||集群 first ep|
|first\_ep\_dnode\_id|INT||集群 first ep 的 dnode id|
|version|VARCHAR||tdengine version。例如3.0.4.0|
|master\_uptime|FLOAT||当前 master 节点的uptime。单位天|
|monitor\_interval|INT||monitor interval。单位秒|
|dbs\_total|INT||database 总数|
|tbs\_total|BIGINT||当前集群 table 总数|
|stbs\_total|INT||当前集群 stable 总数|
|dnodes\_total|INT||当前集群 dnode 总数|
|dnodes\_alive|INT||当前集群 dnode 存活总数|
|mnodes\_total|INT||当前集群 mnode 总数|
|mnodes\_alive|INT||当前集群 mnode 存活总数|
|vgroups\_total|INT||当前集群 vgroup 总数|
|vgroups\_alive|INT||当前集群 vgroup 存活总数|
|vnodes\_total|INT||当前集群 vnode 总数|
|vnodes\_alive|INT||当前集群 vnode 存活总数|
|connections\_total|INT||当前集群连接总数|
|topics\_total|INT||当前集群 topic 总数|
|streams\_total|INT||当前集群 stream 总数|
|protocol|INT||协议版本,目前为 1|
|cluster\_id|NCHAR|TAG|cluster id|
### d\_info 表
`d_info` 表记录 dnode 状态信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|status|VARCHAR||dnode 状态|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### m\_info 表
`m_info` 表记录 mnode 角色信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|role|VARCHAR||mnode 角色, leader 或 follower|
|mnode\_id|INT|TAG|master node id|
|mnode\_ep|NCHAR|TAG|master node endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### dnodes\_info 表
`dnodes_info` 记录 dnode 信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|uptime|FLOAT||dnode uptime|
|cpu\_engine|FLOAT||taosd cpu 使用率,从 `/proc/<taosd_pid>/stat` 读取|
|cpu\_system|FLOAT||服务器 cpu 使用率,从 `/proc/stat` 读取|
|cpu\_cores|FLOAT||服务器 cpu 核数|
|mem\_engine|INT||taosd 内存使用率,从 `/proc/<taosd_pid>/status` 读取|
|mem\_system|INT||服务器可用内存|
|mem\_total|INT||服务器内存总量,单位 KB|
|disk\_engine|INT|||
|disk\_used|BIGINT||data dir 挂载的磁盘使用量,单位 bytes|
|disk\_total|BIGINT||data dir 挂载的磁盘总容量,单位 bytes|
|net\_in|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 received bytes。单位 kb/s|
|net\_out|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 transmit bytes。单位 kb/s|
|io\_read|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 rchar 与上次数值计算之后,计算得到速度。单位 kb/s|
|io\_write|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 wchar 与上次数值计算之后,计算得到速度。单位 kb/s|
|io\_read\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 read_bytes。单位 kb/s|
|io\_write\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 write_bytes。单位 kb/s|
|req\_select|INT||两个间隔内发生的查询请求数目|
|req\_select\_rate|FLOAT||两个间隔内的查询请求速度 = `req_select / monitorInterval`|
|req\_insert|INT||两个间隔内发生的写入请求,包含的单条数据数目|
|req\_insert\_success|INT||两个间隔内发生的处理成功的写入请求,包含的单条数据数目|
|req\_insert\_rate|FLOAT||两个间隔内的单条数据写入速度 = `req_insert / monitorInterval`|
|req\_insert\_batch|INT||两个间隔内发生的写入请求数目|
|req\_insert\_batch\_success|INT||两个间隔内发生的成功的批量写入请求数目|
|req\_insert\_batch\_rate|FLOAT||两个间隔内的写入请求数目的速度 = `req_insert_batch / monitorInterval`|
|errors|INT||两个间隔内的出错的写入请求的数目|
|vnodes\_num|INT||dnode 上 vnodes 数量|
|masters|INT||dnode 上 master node 数量|
|has\_mnode|INT||dnode 是否包含 mnode|
|has\_qnode|INT||dnode 是否包含 qnode|
|has\_snode|INT||dnode 是否包含 snode|
|has\_bnode|INT||dnode 是否包含 bnode|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### data\_dir 表
`data_dir` 表记录 data 目录信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||data 目录,一般为 `/var/lib/taos`|
|level|INT||0、1、2 多级存储级别|
|avail|BIGINT||data 目录可用空间|
|used|BIGINT||data 目录已使用空间|
|total|BIGINT||data 目录空间|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### log\_dir 表
`log_dir` 表记录 log 目录信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||log 目录名,一般为 `/var/log/taos/`|
|avail|BIGINT||log 目录可用空间|
|used|BIGINT||log 目录已使用空间|
|total|BIGINT||log 目录空间|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### temp\_dir 表
`temp_dir` 表记录 temp 目录信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||temp 目录名,一般为 `/tmp/`|
|avail|BIGINT||temp 目录可用空间|
|used|BIGINT||temp 目录已使用空间|
|total|BIGINT||temp 目录空间|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### vgroups\_info 表
`vgroups_info` 表记录虚拟节点组信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|vgroup\_id|INT||vgroup id|
|database\_name|VARCHAR||vgroup 所属的 database 名字|
|tables\_num|BIGINT||vgroup 中 table 数量|
|status|VARCHAR||vgroup 状态|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### vnodes\_role 表
`vnodes_role` 表记录虚拟节点角色信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|vnode\_role|VARCHAR||vnode 角色leader 或 follower|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### logs 表
`logs` 表记录登录信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|level|VARCHAR||log level|
|content|NCHAR||log content长度不超过1024字节|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### log\_summary 表
`log_summary` 记录日志统计信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|error|INT||error 总数|
|info|INT||info 总数|
|debug|INT||debug 总数|
|trace|INT||trace 总数|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### grants\_info 表
`grants_info` 记录授权信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|expire\_time|BIGINT||认证过期时间,企业版有效,社区版为 bigint 最大值|
|timeseries\_used|BIGINT||已用测点数|
|timeseries\_total|BIGINT||总测点数,开源版本为 bigint 最大值|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### keeper\_monitor 表
`keeper_monitor` 记录 taoskeeper 监控数据。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|cpu|FLOAT||cpu 使用率|
|mem|FLOAT||内存使用率|
|identify|NCHAR|TAG||
### taosadapter\_restful\_http\_request\_total 表
`taosadapter_restful_http_request_total` 记录 taosadapter rest 请求信息,该表为 schemaless 方式创建的表,时间戳字段名为 `_ts`
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||监控指标值|
|client\_ip|NCHAR|TAG|client ip|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|request\_method|NCHAR|TAG|request method|
|request\_uri|NCHAR|TAG|request uri|
|status\_code|NCHAR|TAG|status code|
### taosadapter\_restful\_http\_request\_fail 表
`taosadapter_restful_http_request_fail` 记录 taosadapter rest 请求失败信息,该表为 schemaless 方式创建的表,时间戳字段名为 `_ts`
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||监控指标值|
|client\_ip|NCHAR|TAG|client ip|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|request\_method|NCHAR|TAG|request method|
|request\_uri|NCHAR|TAG|request uri|
|status\_code|NCHAR|TAG|status code|
### taosadapter\_restful\_http\_request\_in\_flight 表
`taosadapter_restful_http_request_in_flight` 记录 taosadapter rest 实时请求信息,该表为 schemaless 方式创建的表,时间戳字段名为 `_ts`
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||监控指标值|
|endpoint|NCHAR|TAG|taosadpater endpoint|
### taosadapter\_restful\_http\_request\_summary\_milliseconds 表
`taosadapter_restful_http_request_summary_milliseconds` 记录 taosadapter rest 请求汇总信息,该表为 schemaless 方式创建的表,时间戳字段名为 `_ts`
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|count|DOUBLE|||
|sum|DOUBLE|||
|0.5|DOUBLE|||
|0.9|DOUBLE|||
|0.99|DOUBLE|||
|0.1|DOUBLE|||
|0.2|DOUBLE|||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|request\_method|NCHAR|TAG|request method|
|request\_uri|NCHAR|TAG|request uri|
### taosadapter\_system\_mem\_percent 表
`taosadapter_system_mem_percent` 表记录 taosadapter 内存使用情况,该表为 schemaless 方式创建的表,时间戳字段名为 `_ts`
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||监控指标值|
|endpoint|NCHAR|TAG|taosadpater endpoint|
### taosadapter\_system\_cpu\_percent 表
`taosadapter_system_cpu_percent` 表记录 taosadapter cpu 使用情况,该表为 schemaless 方式创建的表,时间戳字段名为 `_ts`
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||监控指标值|
|endpoint|NCHAR|TAG|taosadpater endpoint|

View File

@ -77,7 +77,7 @@ sudo -u grafana grafana-cli plugins install tdengine-datasource
或者从 [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) 或 [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) 下载 .zip 文件到本地并解压到 Grafana 插件目录。命令行下载示例如下:
```bash
GF_VERSION=3.2.9
GF_VERSION=3.3.1
# from GitHub
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
# from Grafana

View File

@ -10,6 +10,18 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3";
## 3.0.4.0
<Release type="tdengine" version="3.0.4.0" />
## 3.0.3.2
<Release type="tdengine" version="3.0.3.2" />
## 3.0.3.1
<Release type="tdengine" version="3.0.3.1" />
## 3.0.3.0
<Release type="tdengine" version="3.0.3.0" />

View File

@ -10,6 +10,18 @@ taosTools 各版本安装包下载链接如下:
import Release from "/components/ReleaseV3";
## 2.4.12
<Release type="tools" version="2.4.12" />
## 2.4.11
<Release type="tools" version="2.4.11" />
## 2.4.10
<Release type="tools" version="2.4.10" />
## 2.4.9
<Release type="tools" version="2.4.9" />

View File

@ -10,7 +10,7 @@
<description>Demo project for TDengine</description>
<properties>
<spring.version>5.3.20</spring.version>
<spring.version>5.3.26</spring.version>
</properties>
<dependencies>

View File

@ -21,6 +21,7 @@
#include "taos.h"
static int running = 1;
const char* topic_name = "topicname";
static int32_t msg_process(TAOS_RES* msg) {
char buf[1024];
@ -243,7 +244,7 @@ _end:
tmq_list_t* build_topic_list() {
tmq_list_t* topicList = tmq_list_new();
int32_t code = tmq_list_append(topicList, "topicname");
int32_t code = tmq_list_append(topicList, topic_name);
if (code) {
tmq_list_destroy(topicList);
return NULL;
@ -269,6 +270,31 @@ void basic_consume_loop(tmq_t* tmq) {
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
}
void consume_repeatly(tmq_t* tmq) {
int32_t numOfAssignment = 0;
tmq_topic_assignment* pAssign = NULL;
int32_t code = tmq_get_topic_assignment(tmq, topic_name, &pAssign, &numOfAssignment);
if (code != 0) {
fprintf(stderr, "failed to get assignment, reason:%s", tmq_err2str(code));
}
// seek to the earliest offset
for(int32_t i = 0; i < numOfAssignment; ++i) {
tmq_topic_assignment* p = &pAssign[i];
code = tmq_offset_seek(tmq, topic_name, p->vgId, p->begin);
if (code != 0) {
fprintf(stderr, "failed to seek to %ld, reason:%s", p->begin, tmq_err2str(code));
}
}
free(pAssign);
// let's do it again
basic_consume_loop(tmq);
}
int main(int argc, char* argv[]) {
int32_t code;
@ -294,10 +320,13 @@ int main(int argc, char* argv[]) {
if ((code = tmq_subscribe(tmq, topic_list))) {
fprintf(stderr, "Failed to tmq_subscribe(): %s\n", tmq_err2str(code));
}
tmq_list_destroy(topic_list);
basic_consume_loop(tmq);
consume_repeatly(tmq);
code = tmq_consumer_close(tmq);
if (code) {
fprintf(stderr, "Failed to close consumer: %s\n", tmq_err2str(code));

View File

@ -1,16 +1,15 @@
local _M = {}
local driver = require "luaconnector51"
local water_mark = 0
local occupied = 0
local connection_pool = {}
td_pool_watermark = 0
td_pool_occupied = 0
td_connection_pool = {}
function _M.new(o,config)
function _M.new(o, config)
o = o or {}
o.connection_pool = connection_pool
o.water_mark = water_mark
o.occupied = occupied
if #connection_pool == 0 then
o.connection_pool = td_connection_pool
o.watermark = td_pool_watermark
o.occupied = td_pool_occupied
if #td_connection_pool == 0 then
for i = 1, config.connection_pool_size do
local res = driver.connect(config)
if res.code ~= 0 then
@ -18,8 +17,8 @@ function _M.new(o,config)
return nil
else
local object = {obj = res.conn, state = 0}
table.insert(o.connection_pool,i, object)
ngx.log(ngx.INFO, "add connection, now pool size:"..#(o.connection_pool))
table.insert(td_connection_pool, i, object)
ngx.log(ngx.INFO, "add connection, now pool size:"..#(td_connection_pool))
end
end
@ -32,13 +31,13 @@ function _M:get_connection()
local connection_obj
for i = 1, #connection_pool do
connection_obj = connection_pool[i]
for i = 1, #td_connection_pool do
connection_obj = td_connection_pool[i]
if connection_obj.state == 0 then
connection_obj.state = 1
occupied = occupied +1
if occupied > water_mark then
water_mark = occupied
td_pool_occupied = td_pool_occupied + 1
if td_pool_occupied > td_pool_watermark then
td_pool_watermark = td_pool_occupied
end
return connection_obj["obj"]
end
@ -49,21 +48,27 @@ function _M:get_connection()
return nil
end
function _M:get_water_mark()
function _M:get_watermark()
return water_mark
return td_pool_watermark
end
function _M:get_current_load()
return td_pool_occupied
end
function _M:release_connection(conn)
local connection_obj
for i = 1, #connection_pool do
connection_obj = connection_pool[i]
for i = 1, #td_connection_pool do
connection_obj = td_connection_pool[i]
if connection_obj["obj"] == conn then
connection_obj["state"] = 0
occupied = occupied -1
td_pool_occupied = td_pool_occupied -1
return
end
end

View File

@ -4,8 +4,21 @@ local Pool = require "tdpool"
local config = require "config"
ngx.say("start time:"..os.time())
local pool = Pool.new(Pool,config)
local conn = pool:get_connection()
local pool = Pool.new(Pool, config)
local another_pool = Pool.new(Pool, config)
local conn, conn1, conn2
conn = pool:get_connection()
conn1 = pool:get_connection()
conn2 = pool:get_connection()
local temp_conn = another_pool:get_connection()
ngx.say("pool size:"..config.connection_pool_size)
ngx.say("pool watermark:"..pool:get_watermark())
ngx.say("pool current load:"..pool:get_current_load())
pool:release_connection(conn1)
pool:release_connection(conn2)
another_pool:release_connection(temp_conn)
ngx.say("pool watermark:"..pool:get_watermark())
ngx.say("pool current load:"..pool:get_current_load())
local res = driver.query(conn,"drop database if exists nginx")
if res.code ~=0 then
@ -31,7 +44,6 @@ end
res = driver.query(conn,"create table m1 (ts timestamp, speed int,owner binary(20))")
if res.code ~=0 then
ngx.say("create table---failed: "..res.error)
else
ngx.say("create table--- pass.")
end
@ -83,8 +95,5 @@ while not flag do
-- ngx.say("i am here once...")
ngx.sleep(0.001) -- time unit is second
end
ngx.say("pool water_mark:"..pool:get_water_mark())
pool:release_connection(conn)
ngx.say("end time:"..os.time())

View File

@ -4,5 +4,5 @@ if [ "$lua_header_installed" = "0" ]; then
sudo apt install -y liblua5.3-dev
fi
gcc -std=c99 lua_connector.c -fPIC -shared -o luaconnector.so -Wall -ltaos -I/usr/include/lua5.3 -I../../include/client
gcc -g -std=c99 lua_connector.c -fPIC -shared -o luaconnector.so -Wall -ltaos -I/usr/include/lua5.3 -I../../include/client

Some files were not shown because too many files have changed in this diff Show More