Merge branch 'enh/new3.0' into enh/refactorBackend
This commit is contained in:
commit
1a4cfd11a5
|
@ -314,9 +314,9 @@ def pre_test_build_win() {
|
|||
cd %WIN_CONNECTOR_ROOT%
|
||||
python.exe -m pip install --upgrade pip
|
||||
python -m pip uninstall taospy -y
|
||||
python -m pip install taospy==2.7.10
|
||||
python -m pip install taospy==2.7.12
|
||||
python -m pip uninstall taos-ws-py -y
|
||||
python -m pip install taos-ws-py==0.2.8
|
||||
python -m pip install taos-ws-py==0.2.9
|
||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
||||
'''
|
||||
return 1
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
# Security Policy
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Please submit CVE to https://github.com/taosdata/TDengine/security/advisories.
|
|
@ -83,6 +83,18 @@ ELSE ()
|
|||
SET(TAOS_LIB taos)
|
||||
ENDIF ()
|
||||
|
||||
# build TSZ by default
|
||||
IF ("${TSZ_ENABLED}" MATCHES "false")
|
||||
set(VAR_TSZ "" CACHE INTERNAL "global variant empty" )
|
||||
ELSE()
|
||||
# define add
|
||||
MESSAGE(STATUS "build with TSZ enabled")
|
||||
ADD_DEFINITIONS(-DTD_TSZ)
|
||||
set(VAR_TSZ "TSZ" CACHE INTERNAL "global variant tsz" )
|
||||
ENDIF()
|
||||
|
||||
# force set all platform to JEMALLOC_ENABLED = false
|
||||
SET(JEMALLOC_ENABLED OFF)
|
||||
IF (TD_WINDOWS)
|
||||
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
|
||||
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
|
||||
|
@ -106,8 +118,6 @@ IF (TD_WINDOWS)
|
|||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMMON_FLAGS}")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_FLAGS}")
|
||||
|
||||
SET(JEMALLOC_ENABLED OFF)
|
||||
|
||||
ELSE ()
|
||||
IF (${TD_DARWIN})
|
||||
set(CMAKE_MACOSX_RPATH 0)
|
||||
|
|
|
@ -195,6 +195,20 @@ if (TD_LINUX)
|
|||
ELSE()
|
||||
set(TD_DEPS_DIR "x86")
|
||||
ENDIF()
|
||||
elseif (TD_DARWIN)
|
||||
IF (TD_ARM_64 OR TD_ARM_32)
|
||||
set(TD_DEPS_DIR "darwin/arm")
|
||||
ELSE ()
|
||||
set(TD_DEPS_DIR "darwin/x64")
|
||||
ENDIF ()
|
||||
elseif (TD_WINDOWS)
|
||||
IF (TD_WINDOWS_64)
|
||||
set(TD_DEPS_DIR "win/x64")
|
||||
ELSEIF (TD_WINDOWS_32)
|
||||
set(TD_DEPS_DIR "win/i386")
|
||||
ENDIF ()
|
||||
else ()
|
||||
MESSAGE(FATAL_ERROR "unsupported platform")
|
||||
endif()
|
||||
MESSAGE(STATUS "DEPS_DIR: " ${TD_DEPS_DIR})
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.2.0.0.alpha")
|
||||
SET(TD_VER_NUMBER "3.2.1.0.alpha")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# curl
|
||||
ExternalProject_Add(curl
|
||||
URL https://curl.se/download/curl-8.2.1.tar.gz
|
||||
URL_HASH MD5=b25588a43556068be05e1624e0e74d41
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
#GIT_REPOSITORY https://github.com/curl/curl.git
|
||||
|
|
|
@ -399,7 +399,7 @@ if(${BUILD_WITH_COS})
|
|||
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include)
|
||||
MESSAGE("$ENV{HOME}/.cos-local.1/include")
|
||||
|
||||
set(CMAKE_BUILD_TYPE debug)
|
||||
set(CMAKE_BUILD_TYPE Release)
|
||||
set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME})
|
||||
set(CMAKE_PROJECT_NAME cos_c_sdk)
|
||||
|
||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -4,11 +4,11 @@ description: This document introduces the major features, competitive advantages
|
|||
toc_max_heading_level: 2
|
||||
---
|
||||
|
||||
TDengine is an [open source](https://tdengine.com/tdengine/open-source-time-series-database/), [high-performance](https://tdengine.com/tdengine/high-performance-time-series-database/), [cloud native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation.
|
||||
TDengine is a big data platform designed and optimized for IoT (Internet of Things) and Industrial Internet. It can safely and effetively converge, store, process and distribute high volume data (TB or even PB) generated everyday by a lot of devices and data acquisition units, monitor and alert business operation status in real time and provide real time business insight. The core component of TDengine is TDengine OSS, which is a high performance, open source, cloud native and simplified time series database.
|
||||
|
||||
This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine.
|
||||
|
||||
## Major Features
|
||||
## Major Features of TDengine OSS
|
||||
|
||||
The major features are listed below:
|
||||
|
||||
|
@ -132,3 +132,9 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
|
|||
- [Introduction to Time-Series Database](https://tdengine.com/tsdb/)
|
||||
- [Introduction to TDengine competitive advantages](https://tdengine.com/tdengine/)
|
||||
|
||||
|
||||
## Products
|
||||
|
||||
There are two products offered by TDengine: TDengine Enterprise and TDengine Cloud, for details please refer to
|
||||
- [TDengine Enterprise](https://www.taosdata.com/tdengine-pro)
|
||||
- [TDengine Cloud](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
|
||||
|
|
|
@ -221,7 +221,7 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
|
|||
&& tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1
|
||||
```
|
||||
|
||||
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please check available version from [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
|
||||
> Version number `v1.0.2` is only for example, it can be replaced to any newer version.
|
||||
|
||||
**Non-Swoole Environment: **
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ At most 4096 columns are allowed in a STable. If there are more than 4096 of met
|
|||
|
||||
## Create Table
|
||||
|
||||
A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Additionally, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the meters in [Table 1](/tdinternal/arch#model_table1), the table can be created using below SQL statement.
|
||||
A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Additionally, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the smart meters table, the table can be created using below SQL statement.
|
||||
|
||||
```sql
|
||||
CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2);
|
||||
|
|
|
@ -38,7 +38,10 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
|
|||
- All the data in `tag_set` will be converted to NCHAR type automatically
|
||||
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double
|
||||
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h)
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
|
||||
- The rule of table name
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness.
|
||||
- You can configure `smlAutoChildTableNameDelimiter` in taos.cfg to specify a delimiter between tag values as the table names. For example, you set `smlAutoChildTableNameDelimiter=-` in taos.cfg, when you insert `st,t0=cpu1,t1=4 c1=3 1626006833639000000`, the child table will be `cpu1-4`
|
||||
- You can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
|
||||
- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, smlDataFormat is discarded since 3.0.3.0)
|
||||
|
||||
:::
|
||||
|
|
|
@ -33,7 +33,10 @@ For example:
|
|||
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
|
||||
```
|
||||
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||
- The rule of table name
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness.
|
||||
- You can configure `smlAutoChildTableNameDelimiter` in taos.cfg to specify a delimiter between tag values as the table names. For example, you set `smlAutoChildTableNameDelimiter=-` in taos.cfg, when you insert `st,t0=cpu1,t1=4 c1=3 1626006833639000000`, the child table will be `cpu1-4`
|
||||
- You can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
|
||||
|
||||
Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details.
|
||||
|
||||
|
|
|
@ -48,7 +48,10 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
|
|||
:::note
|
||||
|
||||
- In JSON protocol, strings will be converted to NCHAR type and numeric values will be converted to double type.
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||
- The rule of table name
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness.
|
||||
- You can configure `smlAutoChildTableNameDelimiter` in taos.cfg to specify a delimiter between tag values as the table names. For example, you set `smlAutoChildTableNameDelimiter=-` in taos.cfg, when you insert `st,t0=cpu1,t1=4 c1=3 1626006833639000000`, the child table will be `cpu1-4`
|
||||
- You can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -23,20 +23,30 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i
|
|||
|
||||
To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers.
|
||||
|
||||
Tips:(c interface for example)
|
||||
1. A consumption group consumes all data under the same topic, and different consumption groups are independent of each other;
|
||||
2. A consumption group consumes all vgroups of the same topic, which can be composed of multiple consumers, but a vgroup is only consumed by one consumer. If the number of consumers exceeds the number of vgroups, the excess consumers do not consume data;
|
||||
3. On the server side, only one offset is saved for each vgroup, and the offsets for each vgroup are monotonically increasing, but not necessarily continuous. There is no correlation between the offsets of various vgroups;
|
||||
4. Each poll server will return a result block, which belongs to a vgroup and may contain data from multiple versions of wal. This block can be accessed through tmq_get_vgroup_offset. The offset interface obtains the offset of the first record in the block;
|
||||
5. If a consumer group has never committed an offset, when its member consumers restart and pull data again, they start consuming from the set value of the parameter auto.offset.reset; In a consumer lifecycle, the client locally records the offset of the most recent pull data and will not pull duplicate data;
|
||||
6. If a consumer terminates abnormally (without calling tmq_close), they need to wait for about 12 seconds to trigger their consumer group rebalance. The consumer's status on the server will change to LOST, and after about 1 day, the consumer will be automatically deleted; Exit normally, and after exiting, the consumer will be deleted; Add a new consumer, wait for about 2 seconds to trigger Rebalance, and the consumer's status on the server will change to ready;
|
||||
7. The consumer group Rebalance will reassign Vgroups to all consumer members in the ready state of the group, and consumers can only assign/see/commit/poll operations to the Vgroups they are responsible for;
|
||||
8. Consumers can tmq_position to obtain the offset of the current consumption, seek to the specified offset, and consume again;
|
||||
9. Seek points the position to the specified offset without executing the commit operation. Once the seek is successful, it can poll the specified offset and subsequent data;
|
||||
10. Before the seek operation, tmq must be call tmq_get_topic_assignment, The assignment interface obtains the vgroup ID and offset range of the consumer. The seek operation will detect whether the vgroup ID and offset are legal, and if they are illegal, an error will be reported;
|
||||
11. Due to the existence of a WAL expiration deletion mechanism, even if the seek operation is successful, it is possible that the offset has expired when polling data. If the offset of poll is less than the WAL minimum version number, it will be consumed from the WAL minimum version number;
|
||||
12. The tmq_get_vgroup_offset interface obtains the offset of the first data in the result block where the record is located. When seeking to this offset, it will consume all the data in this block. Refer to point four;
|
||||
13. Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
|
||||
The following are some explanations about data subscription, which require some understanding of the architecture of TDengine and the use of various language linker interfaces.
|
||||
- A consumption group consumes all data under the same topic, and different consumption groups are independent of each other;
|
||||
- A consumption group consumes all vgroups of the same topic, which can be composed of multiple consumers, but a vgroup is only consumed by one consumer. If the number of consumers exceeds the number of vgroups, the excess consumers do not consume data;
|
||||
- On the server side, only one offset is saved for each vgroup, and the offsets for each vgroup are monotonically increasing, but not necessarily continuous. There is no correlation between the offsets of various vgroups;
|
||||
- Each poll server will return a result block, which belongs to a vgroup and may contain data from multiple versions of wal. This block can be accessed through offset interface. The offset interface obtains the offset of the first record in the block;
|
||||
- If a consumer group has never committed an offset, when its member consumers restart and pull data again, they start consuming from the set value of the parameter auto.offset.reset; In a consumer lifecycle, the client locally records the offset of the most recent pull data and will not pull duplicate data;
|
||||
- If a consumer terminates abnormally (without calling tmq_close), they need to wait for about 12 seconds to trigger their consumer group rebalance. The consumer's status on the server will change to LOST, and after about 1 day, the consumer will be automatically deleted; Exit normally, and after exiting, the consumer will be deleted; Add a new consumer, wait for about 2 seconds to trigger Rebalance, and the consumer's status on the server will change to ready;
|
||||
- The consumer group Rebalance will reassign Vgroups to all consumer members in the ready state of the group, and consumers can only assign/see/commit/poll operations to the Vgroups they are responsible for;
|
||||
- Consumers can call position interface to obtain the offset of the current consumption, seek to the specified offset, and consume again;
|
||||
- Seek points the position to the specified offset without executing the commit operation. Once the seek is successful, it can poll the specified offset and subsequent data;
|
||||
- Position is to obtain the current consumption position, which is the position to be taken next time, not the current consumption position
|
||||
- Commit is the submission of the consumption location. Without parameters, it is the submission of the current consumption location (the location to be taken next time, not the current consumption location). With parameters, it is the location in the submission parameters (i.e. the location to be taken after the next exit and restart)
|
||||
- Seek is to set the consumer's consumption position. Wherever the seek goes, the position will be returned, all of which are the positions to be taken next time
|
||||
- Seek does not affect commit, commit does not affect seek, independent of each other, the two are different concepts
|
||||
- The begin interface is the offset of the first data in wal, and the end interface is the offset+1 of the last data in wal10.
|
||||
- Before the seek operation, tmq must be call assignment interface, The assignment interface obtains the vgroup ID and offset range of the consumer. The seek operation will detect whether the vgroup ID and offset are legal, and if they are illegal, an error will be reported;
|
||||
- Due to the existence of a WAL expiration deletion mechanism, even if the seek operation is successful, it is possible that the offset has expired when polling data. If the offset of poll is less than the WAL minimum version number, it will be consumed from the WAL minimum version number;
|
||||
- The offset interface obtains the offset of the first data in the result block where the record is located. When seeking to this offset, it will consume all the data in this block. Refer to point four;
|
||||
- Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
|
||||
|
||||
This document does not provide any further introduction to the knowledge of message queues themselves. If you need to know more, please search for it yourself.
|
||||
|
||||
Starting from version 3.2.0.0, data subscription supports vnode migration and splitting.
|
||||
Due to the dependence of data subscription on wal files, wal does not synchronize during vnode migration and splitting. Therefore, after migration or splitting, wal data that has not been consumed before cannot be consumed. So please ensure that all data has been consumed before proceeding with vnode migration or splitting, otherwise data loss may occur during consumption.
|
||||
|
||||
## Data Schema and API
|
||||
|
||||
|
@ -342,10 +352,11 @@ You configure the following parameters when creating a consumer:
|
|||
| `td.connect.port` | string | Port of the server side | |
|
||||
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. Each topic can create up to 100 consumer groups. |
|
||||
| `client.id` | string | Client ID | Maximum length: 192. |
|
||||
| `auto.offset.reset` | enum | Initial offset for the consumer group | `earliest`: subscribe from the earliest data, this is the default behavior; `latest`: subscribe from the latest data; or `none`: can't subscribe without committed offset|
|
||||
| `auto.offset.reset` | enum | Initial offset for the consumer group | `earliest`: subscribe from the earliest data, this is the default behavior(version < 3.2.0.0); `latest`: subscribe from the latest data, this is the default behavior(version >= 3.2.0.0); or `none`: can't subscribe without committed offset|
|
||||
| `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true |
|
||||
| `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds |
|
||||
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages | default value: false
|
||||
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages. Not applicable if subscribe to a column (tbname can be written as a column in the subquery statement during column subscriptions) (This parameter has been deprecated since version 3.2.0.0 and remains true) | default value: false
|
||||
| `enable.replay` | boolean | Specify whether data replay function enabled or not |default value: false |
|
||||
|
||||
The method of specifying these parameters depends on the language used:
|
||||
|
||||
|
@ -361,7 +372,7 @@ tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
|
|||
tmq_conf_set(conf, "group.id", "cgrpName");
|
||||
tmq_conf_set(conf, "td.connect.user", "root");
|
||||
tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
||||
tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
||||
tmq_conf_set(conf, "auto.offset.reset", "latest");
|
||||
tmq_conf_set(conf, "msg.with.table.name", "true");
|
||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||
|
||||
|
@ -391,7 +402,7 @@ properties.setProperty("group.id", "cgrpName");
|
|||
properties.setProperty("bootstrap.servers", "127.0.0.1:6030");
|
||||
properties.setProperty("td.connect.user", "root");
|
||||
properties.setProperty("td.connect.pass", "taosdata");
|
||||
properties.setProperty("auto.offset.reset", "earliest");
|
||||
properties.setProperty("auto.offset.reset", "latest");
|
||||
properties.setProperty("msg.with.table.name", "true");
|
||||
properties.setProperty("value.deserializer", "com.taos.example.MetersDeserializer");
|
||||
|
||||
|
@ -411,7 +422,7 @@ public class MetersDeserializer extends ReferenceDeserializer<Meters> {
|
|||
```go
|
||||
conf := &tmq.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"auto.offset.reset": "latest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
|
@ -431,7 +442,7 @@ consumer, err := NewConsumer(conf)
|
|||
let mut dsn: Dsn = "taos://".parse()?;
|
||||
dsn.set("group.id", "group1");
|
||||
dsn.set("client.id", "test");
|
||||
dsn.set("auto.offset.reset", "earliest");
|
||||
dsn.set("auto.offset.reset", "latest");
|
||||
|
||||
let tmq = TmqBuilder::from_dsn(dsn)?;
|
||||
|
||||
|
@ -448,7 +459,19 @@ from taos.tmq import Consumer
|
|||
# Syntax: `consumer = Consumer(configs)`
|
||||
#
|
||||
# Example:
|
||||
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||
consumer = Consumer(
|
||||
{
|
||||
"group.id": "local",
|
||||
"client.id": "1",
|
||||
"enable.auto.commit": "true",
|
||||
"auto.commit.interval.ms": "1000",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"auto.offset.reset": "latest",
|
||||
"msg.with.table.name": "true",
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -465,7 +488,7 @@ let consumer = taos.consumer({
|
|||
'group.id': 'tg2',
|
||||
'td.connect.user': 'root',
|
||||
'td.connect.pass': 'taosdata',
|
||||
'auto.offset.reset','earliest',
|
||||
'auto.offset.reset','latest',
|
||||
'msg.with.table.name': 'true',
|
||||
'td.connect.ip','127.0.0.1',
|
||||
'td.connect.port','6030'
|
||||
|
@ -488,7 +511,7 @@ var cfg = new ConsumerConfig
|
|||
GourpId = "TDengine-TMQ-C#",
|
||||
TDConnectUser = "root",
|
||||
TDConnectPasswd = "taosdata",
|
||||
AutoOffsetReset = "earliest"
|
||||
AutoOffsetReset = "latest"
|
||||
MsgWithTableName = "true",
|
||||
TDConnectIp = "127.0.0.1",
|
||||
TDConnectPort = "6030"
|
||||
|
@ -504,6 +527,24 @@ var consumer = new ConsumerBuilder(cfg).Build();
|
|||
|
||||
A consumer group is automatically created when multiple consumers are configured with the same consumer group ID.
|
||||
|
||||
Data replay function description:
|
||||
- Subscription adds replay function, which replays according to the time of data writing.
|
||||
For example, writing three pieces of data at the following time.
|
||||
```sql
|
||||
2023/09/22 00:00:00.000
|
||||
2023/09/22 00:00:05.000
|
||||
2023/09/22 00:00:08.000
|
||||
```
|
||||
After subscribing to the first data for 5 seconds, the second data is returned, and after obtaining the second data for 3 seconds, the third data is returned.
|
||||
- Only column subscriptions support data replay.
|
||||
- Replay needs to ensure an independent timeline
|
||||
- If it is a sub table subscription or a normal table subscription, only one vnode has data, ensuring a timeline.
|
||||
- If subscribing to a super table, it is necessary to ensure that the DB has only one vnode, otherwise an error will be reported (because the data subscribed to on multiple vnodes is not on the same timeline).
|
||||
- Super table and database subscriptions do not support replay
|
||||
- Add the enable.replay parameter. True indicates that the subscription replay function is enabled, while false indicates that the subscription replay function is not enabled by default.
|
||||
- Replay does not support progress saving, so when the replay parameter enable, auto commit will automatically close.
|
||||
- Due to the processing time required for data replay, there is an error of tens of milliseconds in the accuracy of replay.
|
||||
|
||||
## Subscribe to a Topic
|
||||
|
||||
A single consumer can subscribe to multiple topics.
|
||||
|
|
|
@ -12,7 +12,7 @@ The FQDN of all hosts must be setup properly. For e.g. FQDNs may have to be conf
|
|||
|
||||
### Step 1
|
||||
|
||||
If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](../../operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`.
|
||||
If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`.
|
||||
|
||||
:::note
|
||||
FQDN information is written to file. If you have started TDengine without configuring or changing the FQDN, ensure that data is backed up or no longer needed before running the `rm -rf /var/lib\taos/\*` command.
|
||||
|
|
|
@ -56,7 +56,7 @@ database_option: {
|
|||
- WAL_FSYNC_PERIOD: specifies the interval (in milliseconds) at which data is written from the WAL to disk. This parameter takes effect only when the WAL parameter is set to 2. The default value is 3000. Enter a value between 0 and 180000. The value 0 indicates that incoming data is immediately written to disk.
|
||||
- MAXROWS: specifies the maximum number of rows recorded in a block. The default value is 4096.
|
||||
- MINROWS: specifies the minimum number of rows recorded in a block. The default value is 100.
|
||||
- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. TDengine Enterprise supports [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function, thus multiple KEEP values (comma separated and up to 3 values supported, and meet keep 0 <= keep 1 <= keep 2, e.g. KEEP 100h,100d,3650d) are supported; TDengine OSS does not support Tiered Storage function (although multiple keep values are configured, they do not take effect, only the maximum keep value is used as KEEP).
|
||||
- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to three times of the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. TDengine Enterprise supports [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function, thus multiple KEEP values (comma separated and up to 3 values supported, and meet keep 0 <= keep 1 <= keep 2, e.g. KEEP 100h,100d,3650d) are supported; TDengine OSS does not support Tiered Storage function (although multiple keep values are configured, they do not take effect, only the maximum keep value is used as KEEP).
|
||||
- PAGES: specifies the number of pages in the metadata storage engine cache on each vnode. Enter a value greater than or equal to 64. The default value is 256. The space occupied by metadata storage on each vnode is equal to the product of the values of the PAGESIZE and PAGES parameters. The space occupied by default is 1 MB.
|
||||
- PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384.
|
||||
- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms.
|
||||
|
|
|
@ -24,7 +24,7 @@ SELECT [hints] [DISTINCT] select_list
|
|||
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
|
||||
|
||||
hint:
|
||||
BATCH_SCAN | NO_BATCH_SCAN
|
||||
BATCH_SCAN | NO_BATCH_SCAN | SORT_FOR_GROUP
|
||||
|
||||
select_list:
|
||||
select_expr [, select_expr] ...
|
||||
|
@ -87,15 +87,17 @@ Hints are a means of user control over query optimization for individual stateme
|
|||
|
||||
The list of currently supported Hints is as follows:
|
||||
|
||||
| **Hint** | **Params** | **Comment** | **Scopt** |
|
||||
| :-----------: | -------------- | -------------------------- | -------------------------- |
|
||||
| BATCH_SCAN | None | Batch table scan | JOIN statment for stable |
|
||||
| NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable |
|
||||
| **Hint** | **Params** | **Comment** | **Scopt** |
|
||||
| :-----------: | -------------- | -------------------------- | -----------------------------------|
|
||||
| BATCH_SCAN | None | Batch table scan | JOIN statment for stable |
|
||||
| NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable |
|
||||
| SORT_FOR_GROUP| None | Use sort for partition | With normal column in partition by list |
|
||||
|
||||
For example:
|
||||
|
||||
```sql
|
||||
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
|
||||
SELECT /*+ SORT_FOR_GROUP() */ count(*), c1 FROM stable1 PARTITION BY c1;
|
||||
```
|
||||
|
||||
## Lists
|
||||
|
|
|
@ -54,6 +54,7 @@ LIKE is used together with wildcards to match strings. Its usage is described as
|
|||
MATCH and NMATCH are used together with regular expressions to match strings. Their usage is described as follows:
|
||||
|
||||
- Use POSIX regular expression syntax. For more information, see Regular Expressions.
|
||||
- The `MATCH` operator returns true when the regular expression is matched. The `NMATCH` operator returns true when the regular expression is not matched.
|
||||
- Regular expression can be used against only table names, i.e. `tbname`, and tags/columns of binary/nchar types.
|
||||
- The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on the client side, and will take effect after restarting the client.
|
||||
|
||||
|
|
|
@ -180,6 +180,7 @@ The following list shows all reserved keywords:
|
|||
- MAX_DELAY
|
||||
- BWLIMIT
|
||||
- MAXROWS
|
||||
- MAX_SPEED
|
||||
- MERGE
|
||||
- META
|
||||
- MINROWS
|
||||
|
|
|
@ -26,75 +26,85 @@ This document introduces the tables of INFORMATION_SCHEMA and their structure.
|
|||
|
||||
## INS_DNODES
|
||||
|
||||
Provides information about dnodes. Similar to SHOW DNODES.
|
||||
Provides information about dnodes. Similar to SHOW DNODES. Users whose SYSINFO attribute is 0 can't view this table.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :------------: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | vnodes | SMALLINT | Current number of vnodes on the dnode. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 2 | support_vnodes | SMALLINT | Maximum number of vnodes on the dnode |
|
||||
| 3 | status | BINARY(10) | Current status |
|
||||
| 4 | note | BINARY(256) | Reason for going offline or other information |
|
||||
| 3 | status | VARCHAR(10) | Current status |
|
||||
| 4 | note | VARCHAR(256) | Reason for going offline or other information |
|
||||
| 5 | id | SMALLINT | Dnode ID |
|
||||
| 6 | endpoint | BINARY(134) | Dnode endpoint |
|
||||
| 6 | endpoint | VARCHAR(134) | Dnode endpoint |
|
||||
| 7 | create | TIMESTAMP | Creation time |
|
||||
|
||||
## INS_MNODES
|
||||
|
||||
Provides information about mnodes. Similar to SHOW MNODES.
|
||||
Provides information about mnodes. Similar to SHOW MNODES. Users whose SYSINFO attribute is 0 can't view this table.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------- | ------------------------------------------ |
|
||||
| 1 | id | SMALLINT | Mnode ID |
|
||||
| 2 | endpoint | BINARY(134) | Mnode endpoint |
|
||||
| 3 | role | BINARY(10) | Current role |
|
||||
| 2 | endpoint | VARCHAR(134) | Mnode endpoint |
|
||||
| 3 | role | VARCHAR(10) | Current role |
|
||||
| 4 | role_time | TIMESTAMP | Time at which the current role was assumed |
|
||||
| 5 | create_time | TIMESTAMP | Creation time |
|
||||
|
||||
## INS_QNODES
|
||||
|
||||
Provides information about qnodes. Similar to SHOW QNODES.
|
||||
Provides information about qnodes. Similar to SHOW QNODES. Users whose SYSINFO attribute is 0 can't view this table.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------- | --------------- |
|
||||
| 1 | id | SMALLINT | Qnode ID |
|
||||
| 2 | endpoint | BINARY(134) | Qnode endpoint |
|
||||
| 2 | endpoint | VARCHAR(134) | Qnode endpoint |
|
||||
| 3 | create_time | TIMESTAMP | Creation time |
|
||||
|
||||
## INS_SNODES
|
||||
|
||||
Provides information about snodes. Similar to SHOW SNODES. Users whose SYSINFO attribute is 0 can't view this table.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------- | --------------- |
|
||||
| 1 | id | SMALLINT | Snode ID |
|
||||
| 2 | endpoint | VARCHAR(134) | Snode endpoint |
|
||||
| 3 | create_time | TIMESTAMP | Creation time |
|
||||
|
||||
## INS_CLUSTER
|
||||
|
||||
Provides information about the cluster.
|
||||
Provides information about the cluster. Users whose SYSINFO attribute is 0 can't view this table.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------- | --------------- |
|
||||
| 1 | id | BIGINT | Cluster ID |
|
||||
| 2 | name | BINARY(134) | Cluster name |
|
||||
| 2 | name | VARCHAR(134) | Cluster name |
|
||||
| 3 | create_time | TIMESTAMP | Creation time |
|
||||
|
||||
## INS_DATABASES
|
||||
|
||||
Provides information about user-created databases. Similar to SHOW DATABASES.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :------------------: | ---------------- | ------------------------------------------------ |
|
||||
| 1| name| BINARY(32)| Database name |
|
||||
| 1 | name | VARCHAR(64) | Database name |
|
||||
| 2 | create_time | TIMESTAMP | Creation time |
|
||||
| 3 | ntables | INT | Number of standard tables and subtables (not including supertables) |
|
||||
| 4 | vgroups | INT | Number of vgroups. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 6 | replica | INT | Number of replicas. It should be noted that `replica` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 7 | strict | BINARY(4) | Obsoleted |
|
||||
| 8 | duration | INT | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | keep | INT | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 7 | strict | VARCHAR(4) | Obsoleted |
|
||||
| 8 | duration | VARCHAR(10) | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | keep | VARCHAR(32) | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 10 | buffer | INT | Write cache size per vnode, in MB. It should be noted that `buffer` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 11 | pagesize | INT | Page size for vnode metadata storage engine, in KB. It should be noted that `pagesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 12 | pages | INT | Number of pages per vnode metadata storage engine. It should be noted that `pages` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 13 | minrows | INT | Maximum number of records per file block. It should be noted that `minrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 14 | maxrows | INT | Minimum number of records per file block. It should be noted that `maxrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 15 | comp | INT | Compression method. It should be noted that `comp` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 16 | precision | BINARY(2) | Time precision. It should be noted that `precision` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 17 | status | BINARY(10) | Current database status |
|
||||
| 18 | retentions | BINARY (60) | Aggregation interval and retention period. It should be noted that `retentions` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 16 | precision | VARCHAR(2) | Time precision. It should be noted that `precision` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 17 | status | VARCHAR(10) | Current database status |
|
||||
| 18 | retentions | VARCHAR(60) | Aggregation interval and retention period. It should be noted that `retentions` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 19 | single_stable | BOOL | Whether the database can contain multiple supertables. It should be noted that `single_stable` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 20 | cachemodel | BINARY(60) | Caching method for the newest data. It should be noted that `cachemodel` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 20 | cachemodel | VARCHAR(60) | Caching method for the newest data. It should be noted that `cachemodel` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 21 | cachesize | INT | Memory per vnode used for caching the newest data. It should be noted that `cachesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 22 | wal_level | INT | WAL level. It should be noted that `wal_level` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk. It should be noted that `wal_fsync_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
@ -111,15 +121,15 @@ Provides information about user-defined functions.
|
|||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :-----------: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | name | BINARY(64) | Function name |
|
||||
| 2 | comment | BINARY(255) | Function description. It should be noted that `comment` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 1 | name | VARCHAR(64) | Function name |
|
||||
| 2 | comment | VARCHAR(255) | Function description. It should be noted that `comment` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 3 | aggregate | INT | Whether the UDF is an aggregate function. It should be noted that `aggregate` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 4 | output_type | BINARY(31) | Output data type |
|
||||
| 4 | output_type | VARCHAR(31) | Output data type |
|
||||
| 5 | create_time | TIMESTAMP | Creation time |
|
||||
| 6 | code_len | INT | Length of the source code |
|
||||
| 7 | bufsize | INT | Buffer size |
|
||||
| 8 | func_language | BINARY(31) | UDF programming language |
|
||||
| 9 | func_body | BINARY(16384) | UDF function body |
|
||||
| 8 | func_language | VARCHAR(31) | UDF programming language |
|
||||
| 9 | func_body | VARCHAR(16384) | UDF function body |
|
||||
| 10 | func_version | INT | UDF function version. starting from 0. Increasing by 1 each time it is updated |
|
||||
|
||||
## INS_INDEXES
|
||||
|
@ -128,12 +138,12 @@ Provides information about user-created indices. Similar to SHOW INDEX.
|
|||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :--------------: | ------------- | --------------------------------------------------------------------- |
|
||||
| 1 | db_name | BINARY(32) | Database containing the table with the specified index |
|
||||
| 2 | table_name | BINARY(192) | Table containing the specified index |
|
||||
| 3 | index_name | BINARY(192) | Index name |
|
||||
| 4 | db_name | BINARY(64) | Index column |
|
||||
| 5 | index_type | BINARY(10) | SMA or tag index |
|
||||
| 6 | index_extensions | BINARY(256) | Other information For SMA/tag indices, this shows a list of functions |
|
||||
| 1 | db_name | VARCHAR(32) | Database containing the table with the specified index |
|
||||
| 2 | table_name | VARCHAR(192) | Table containing the specified index |
|
||||
| 3 | index_name | VARCHAR(192) | Index name |
|
||||
| 4 | db_name | VARCHAR(64) | Index column |
|
||||
| 5 | index_type | VARCHAR(10) | SMA or tag index |
|
||||
| 6 | index_extensions | VARCHAR(256) | Other information For SMA/tag indices, this shows a list of functions |
|
||||
|
||||
## INS_STABLES
|
||||
|
||||
|
@ -141,16 +151,16 @@ Provides information about supertables.
|
|||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :-----------: | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | stable_name | BINARY(192) | Supertable name |
|
||||
| 2 | db_name | BINARY(64) | All databases in the supertable |
|
||||
| 1 | stable_name | VARCHAR(192) | Supertable name |
|
||||
| 2 | db_name | VARCHAR(64) | All databases in the supertable |
|
||||
| 3 | create_time | TIMESTAMP | Creation time |
|
||||
| 4 | columns | INT | Number of columns |
|
||||
| 5 | tags | INT | Number of tags. It should be noted that `tags` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 6 | last_update | TIMESTAMP | Last updated time |
|
||||
| 7 | table_comment | BINARY(1024) | Table description |
|
||||
| 8 | watermark | BINARY(64) | Window closing time. It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | max_delay | BINARY(64) | Maximum delay for pushing stream processing results. It should be noted that `max_delay` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 10 | rollup | BINARY(128) | Rollup aggregate function. It should be noted that `rollup` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 7 | table_comment | VARCHAR(1024) | Table description |
|
||||
| 8 | watermark | VARCHAR(64) | Window closing time. It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | max_delay | VARCHAR(64) | Maximum delay for pushing stream processing results. It should be noted that `max_delay` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 10 | rollup | VARCHAR(128) | Rollup aggregate function. It should be noted that `rollup` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
||||
## INS_TABLES
|
||||
|
||||
|
@ -158,37 +168,37 @@ Provides information about standard tables and subtables.
|
|||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :-----------: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | table_name | BINARY(192) | Table name |
|
||||
| 2 | db_name | BINARY(64) | Database name |
|
||||
| 1 | table_name | VARCHAR(192) | Table name |
|
||||
| 2 | db_name | VARCHAR(64) | Database name |
|
||||
| 3 | create_time | TIMESTAMP | Creation time |
|
||||
| 4 | columns | INT | Number of columns |
|
||||
| 5 | stable_name | BINARY(192) | Supertable name |
|
||||
| 5 | stable_name | VARCHAR(192) | Supertable name |
|
||||
| 6 | uid | BIGINT | Table ID |
|
||||
| 7 | vgroup_id | INT | Vgroup ID |
|
||||
| 8 | ttl | INT | Table time-to-live. It should be noted that `ttl` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | table_comment | BINARY(1024) | Table description |
|
||||
| 10 | type | BINARY(20) | Table type |
|
||||
| 9 | table_comment | VARCHAR(1024) | Table description |
|
||||
| 10 | type | VARCHAR(20) | Table type |
|
||||
|
||||
## INS_TAGS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------- | --------------- |
|
||||
| 1 | table_name | BINARY(192) | Table name |
|
||||
| 2 | db_name | BINARY(64) | Database name |
|
||||
| 3 | stable_name | BINARY(192) | Supertable name |
|
||||
| 4 | tag_name | BINARY(64) | Tag name |
|
||||
| 5 | tag_type | BINARY(64) | Tag type |
|
||||
| 6 | tag_value | BINARY(16384) | Tag value |
|
||||
| 1 | table_name | VARCHAR(192) | Table name |
|
||||
| 2 | db_name | VARCHAR(64) | Database name |
|
||||
| 3 | stable_name | VARCHAR(192) | Supertable name |
|
||||
| 4 | tag_name | VARCHAR(64) | Tag name |
|
||||
| 5 | tag_type | VARCHAR(64) | Tag type |
|
||||
| 6 | tag_value | VARCHAR(16384) | Tag value |
|
||||
|
||||
## INS_COLUMNS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :-----------: | ------------- | ---------------- |
|
||||
| 1 | table_name | BINARY(192) | Table name |
|
||||
| 2 | db_name | BINARY(64) | Database name |
|
||||
| 3 | table_type | BINARY(21) | Table type |
|
||||
| 4 | col_name | BINARY(64) | Column name |
|
||||
| 5 | col_type | BINARY(32) | Column type |
|
||||
| 1 | table_name | VARCHAR(192) | Table name |
|
||||
| 2 | db_name | VARCHAR(64) | Database name |
|
||||
| 3 | table_type | VARCHAR(21) | Table type |
|
||||
| 4 | col_name | VARCHAR(64) | Column name |
|
||||
| 5 | col_type | VARCHAR(32) | Column type |
|
||||
| 6 | col_length | INT | Column length |
|
||||
| 7 | col_precision | INT | Column precision |
|
||||
| 8 | col_scale | INT | Column scale |
|
||||
|
@ -196,51 +206,51 @@ Provides information about standard tables and subtables.
|
|||
|
||||
## INS_USERS
|
||||
|
||||
Provides information about TDengine users.
|
||||
Provides information about TDengine users. Users whose SYSINFO attribute is 0 can't view this table.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------- | ---------------- |
|
||||
| 1 | user_name | BINARY(23) | User name |
|
||||
| 2 | privilege | BINARY(256) | User permissions |
|
||||
| 1 | user_name | VARCHAR(23) | User name |
|
||||
| 2 | privilege | VARCHAR(256) | User permissions |
|
||||
| 3 | create_time | TIMESTAMP | Creation time |
|
||||
|
||||
## INS_GRANTS
|
||||
|
||||
Provides information about TDengine Enterprise Edition permissions.
|
||||
Provides information about TDengine Enterprise Edition permissions. Users whose SYSINFO attribute is 0 can't view this table.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | version | BINARY(9) | Whether the deployment is a licensed or trial version |
|
||||
| 2 | cpu_cores | BINARY(9) | CPU cores included in license |
|
||||
| 3 | dnodes | BINARY(10) | Dnodes included in license. It should be noted that `dnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 4 | streams | BINARY(10) | Streams included in license. It should be noted that `streams` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 5 | users | BINARY(10) | Users included in license. It should be noted that `users` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 6 | accounts | BINARY(10) | Accounts included in license. It should be noted that `accounts` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 7 | storage | BINARY(21) | Storage space included in license. It should be noted that `storage` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 8 | connections | BINARY(21) | Client connections included in license. It should be noted that `connections` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | databases | BINARY(11) | Databases included in license. It should be noted that `databases` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 10 | speed | BINARY(9) | Write speed specified in license (data points per second) |
|
||||
| 11 | querytime | BINARY(9) | Total query time specified in license |
|
||||
| 12 | timeseries | BINARY(21) | Number of metrics included in license |
|
||||
| 13 | expired | BINARY(5) | Whether the license has expired |
|
||||
| 14 | expire_time | BINARY(19) | When the trial period expires |
|
||||
| 1 | version | VARCHAR(9) | Whether the deployment is a licensed or trial version |
|
||||
| 2 | cpu_cores | VARCHAR(9) | CPU cores included in license |
|
||||
| 3 | dnodes | VARCHAR(10) | Dnodes included in license. It should be noted that `dnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 4 | streams | VARCHAR(10) | Streams included in license. It should be noted that `streams` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 5 | users | VARCHAR(10) | Users included in license. It should be noted that `users` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 6 | accounts | VARCHAR(10) | Accounts included in license. It should be noted that `accounts` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 7 | storage | VARCHAR(21) | Storage space included in license. It should be noted that `storage` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 8 | connections | VARCHAR(21) | Client connections included in license. It should be noted that `connections` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | databases | VARCHAR(11) | Databases included in license. It should be noted that `databases` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 10 | speed | VARCHAR(9) | Write speed specified in license (data points per second) |
|
||||
| 11 | querytime | VARCHAR(9) | Total query time specified in license |
|
||||
| 12 | timeseries | VARCHAR(21) | Number of metrics included in license |
|
||||
| 13 | expired | VARCHAR(5) | Whether the license has expired |
|
||||
| 14 | expire_time | VARCHAR(19) | When the trial period expires |
|
||||
|
||||
## INS_VGROUPS
|
||||
|
||||
Provides information about vgroups.
|
||||
Provides information about vgroups. Users whose SYSINFO attribute is 0 can't view this table.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :--------: | ------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | vgroup_id | INT | Vgroup ID |
|
||||
| 2 | db_name | BINARY(32) | Database name |
|
||||
| 2 | db_name | VARCHAR(32) | Database name |
|
||||
| 3 | tables | INT | Tables in vgroup. It should be noted that `tables` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 4 | status | BINARY(10) | Vgroup status |
|
||||
| 4 | status | VARCHAR(10) | Vgroup status |
|
||||
| 5 | v1_dnode | INT | Dnode ID of first vgroup member |
|
||||
| 6 | v1_status | BINARY(10) | Status of first vgroup member |
|
||||
| 6 | v1_status | VARCHAR(10) | Status of first vgroup member |
|
||||
| 7 | v2_dnode | INT | Dnode ID of second vgroup member |
|
||||
| 8 | v2_status | BINARY(10) | Status of second vgroup member |
|
||||
| 8 | v2_status | VARCHAR(10) | Status of second vgroup member |
|
||||
| 9 | v3_dnode | INT | Dnode ID of third vgroup member |
|
||||
| 10 | v3_status | BINARY(10) | Status of third vgroup member |
|
||||
| 10 | v3_status | VARCHAR(10) | Status of third vgroup member |
|
||||
| 11 | nfiles | INT | Number of data and metadata files in the vgroup |
|
||||
| 12 | file_size | INT | Size of the data and metadata files in the vgroup |
|
||||
| 13 | tsma | TINYINT | Whether time-range-wise SMA is enabled. 1 means enabled; 0 means disabled. |
|
||||
|
@ -251,55 +261,57 @@ Provides system configuration information.
|
|||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :--------: | ------------- | ----------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | name | BINARY(32) | Parameter |
|
||||
| 2 | value | BINARY(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 1 | name | VARCHAR(32) | Parameter |
|
||||
| 2 | value | VARCHAR(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
||||
## INS_DNODE_VARIABLES
|
||||
|
||||
Provides dnode configuration information.
|
||||
Provides dnode configuration information. Users whose SYSINFO attribute is 0 can't view this table.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :--------: | ------------- | ----------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | dnode_id | INT | Dnode ID |
|
||||
| 2 | name | BINARY(32) | Parameter |
|
||||
| 3 | value | BINARY(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 2 | name | VARCHAR(32) | Parameter |
|
||||
| 3 | value | VARCHAR(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
||||
## INS_TOPICS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------- | -------------------------------------- |
|
||||
| 1 | topic_name | BINARY(192) | Topic name |
|
||||
| 2 | db_name | BINARY(64) | Database for the topic |
|
||||
| 1 | topic_name | VARCHAR(192) | Topic name |
|
||||
| 2 | db_name | VARCHAR(64) | Database for the topic |
|
||||
| 3 | create_time | TIMESTAMP | Creation time |
|
||||
| 4 | sql | BINARY(1024) | SQL statement used to create the topic |
|
||||
| 4 | sql | VARCHAR(1024) | SQL statement used to create the topic |
|
||||
|
||||
## INS_SUBSCRIPTIONS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :------------: | ------------- | --------------------------- |
|
||||
| 1 | topic_name | BINARY(204) | Subscribed topic |
|
||||
| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
|
||||
| 1 | topic_name | VARCHAR(204) | Subscribed topic |
|
||||
| 2 | consumer_group | VARCHAR(193) | Subscribed consumer group |
|
||||
| 3 | vgroup_id | INT | Vgroup ID for the consumer |
|
||||
| 4 | consumer_id | BIGINT | Consumer ID |
|
||||
| 5 | offset | BINARY(64) | Consumption progress |
|
||||
| 5 | offset | VARCHAR(64) | Consumption progress |
|
||||
| 6 | rows | BIGINT | Number of consumption items |
|
||||
|
||||
## INS_STREAMS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :----------: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | stream_name | BINARY(64) | Stream name |
|
||||
| 1 | stream_name | VARCHAR(64) | Stream name |
|
||||
| 2 | create_time | TIMESTAMP | Creation time |
|
||||
| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
|
||||
| 4 | status | BINARY(20) | Current status |
|
||||
| 5 | source_db | BINARY(64) | Source database |
|
||||
| 6 | target_db | BINARY(64) | Target database |
|
||||
| 7 | target_table | BINARY(192) | Target table |
|
||||
| 3 | sql | VARCHAR(1024) | SQL statement used to create the stream |
|
||||
| 4 | status | VARCHAR(20) | Current status |
|
||||
| 5 | source_db | VARCHAR(64) | Source database |
|
||||
| 6 | target_db | VARCHAR(64) | Target database |
|
||||
| 7 | target_table | VARCHAR(192) | Target table |
|
||||
| 8 | watermark | BIGINT | Watermark (see stream processing documentation). It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation). It should be noted that `trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
||||
## INS_USER_PRIVILEGES
|
||||
|
||||
Users whose SYSINFO attribute is 0 can't view this table.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |** |
|
||||
| --- | :----------: | ------------ | -------------------------------------------|
|
||||
| 1 | user_name | VARCHAR(24) | Username |
|
||||
|
|
|
@ -73,10 +73,10 @@ Shows the SQL statement used to create the specified table. This statement can b
|
|||
## SHOW DATABASES
|
||||
|
||||
```sql
|
||||
SHOW DATABASES;
|
||||
SHOW [USER | SYSTEM] DATABASES;
|
||||
```
|
||||
|
||||
Shows all user-created databases.
|
||||
Shows all databases. The `USER` qualifier specifies only user-created databases. The `SYSTEM` qualifier specifies only system databases.
|
||||
|
||||
## SHOW DNODES
|
||||
|
||||
|
@ -183,10 +183,10 @@ Shows all subscriptions in the system.
|
|||
## SHOW TABLES
|
||||
|
||||
```sql
|
||||
SHOW [db_name.]TABLES [LIKE 'pattern'];
|
||||
SHOW [NORMAL | CHILD] [db_name.]TABLES [LIKE 'pattern'];
|
||||
```
|
||||
|
||||
Shows all standard tables and subtables in the current database. You can use LIKE for fuzzy matching.
|
||||
Shows all standard tables and subtables in the current database. You can use LIKE for fuzzy matching. The `Normal` qualifier specifies standard tables. The `CHILD` qualifier specifies subtables.
|
||||
|
||||
## SHOW TABLE DISTRIBUTED
|
||||
|
||||
|
|
|
@ -1,178 +0,0 @@
|
|||
---
|
||||
title: Install and Uninstall
|
||||
description: This document describes how to install, upgrade, and uninstall TDengine.
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
This document gives more information about installing, uninstalling, and upgrading TDengine.
|
||||
|
||||
## Install
|
||||
|
||||
About details of installing TDenine, please refer to [Installation Guide](../../get-started/package/).
|
||||
|
||||
## Uninstall
|
||||
|
||||
<Tabs>
|
||||
<TabItem label="Uninstall by apt-get" value="aptremove">
|
||||
|
||||
Uninstall package of TDengine by apt-get can be uninstalled as below:
|
||||
|
||||
```bash
|
||||
$ sudo apt-get remove tdengine
|
||||
Reading package lists... Done
|
||||
Building dependency tree
|
||||
Reading state information... Done
|
||||
The following packages will be REMOVED:
|
||||
tdengine
|
||||
0 upgraded, 0 newly installed, 1 to remove and 18 not upgraded.
|
||||
After this operation, 68.3 MB disk space will be freed.
|
||||
Do you want to continue? [Y/n] y
|
||||
(Reading database ... 135625 files and directories currently installed.)
|
||||
Removing tdengine (3.0.0.0) ...
|
||||
TDengine is removed successfully!
|
||||
|
||||
```
|
||||
|
||||
If you have installed taos-tools, please uninstall it first before uninstall TDengine. The command of uninstall is following:
|
||||
|
||||
```
|
||||
$ sudo apt remove taostools
|
||||
Reading package lists... Done
|
||||
Building dependency tree
|
||||
Reading state information... Done
|
||||
The following packages will be REMOVED:
|
||||
taostools
|
||||
0 upgraded, 0 newly installed, 1 to remove and 0 not upgraded.
|
||||
After this operation, 68.3 MB disk space will be freed.
|
||||
Do you want to continue? [Y/n]
|
||||
(Reading database ... 147973 files and directories currently installed.)
|
||||
Removing taostools (2.1.2) ...
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Uninstall Deb" value="debuninst">
|
||||
|
||||
Deb package of TDengine can be uninstalled as below:
|
||||
|
||||
```
|
||||
$ sudo dpkg -r tdengine
|
||||
(Reading database ... 137504 files and directories currently installed.)
|
||||
Removing tdengine (3.0.0.0) ...
|
||||
TDengine is removed successfully!
|
||||
|
||||
```
|
||||
|
||||
Deb package of taosTools can be uninstalled as below:
|
||||
|
||||
```
|
||||
$ sudo dpkg -r taostools
|
||||
(Reading database ... 147973 files and directories currently installed.)
|
||||
Removing taostools (2.1.2) ...
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Uninstall RPM" value="rpmuninst">
|
||||
|
||||
RPM package of TDengine can be uninstalled as below:
|
||||
|
||||
```
|
||||
$ sudo rpm -e tdengine
|
||||
TDengine is removed successfully!
|
||||
```
|
||||
|
||||
RPM package of taosTools can be uninstalled as below:
|
||||
|
||||
```
|
||||
sudo rpm -e taostools
|
||||
taosToole is removed successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Uninstall tar.gz" value="taruninst">
|
||||
|
||||
tar.gz package of TDengine can be uninstalled as below:
|
||||
|
||||
```
|
||||
$ rmtaos
|
||||
TDengine is removed successfully!
|
||||
```
|
||||
|
||||
tar.gz package of taosTools can be uninstalled as below:
|
||||
|
||||
```
|
||||
$ rmtaostools
|
||||
Start to uninstall taos tools ...
|
||||
|
||||
taos tools is uninstalled successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Windows uninstall" value="windows">
|
||||
Run C:\TDengine\unins000.exe to uninstall TDengine on a Windows system.
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Mac uninstall" value="mac">
|
||||
|
||||
TDengine can be uninstalled as below:
|
||||
|
||||
```
|
||||
$ rmtaos
|
||||
TDengine is removed successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
:::info
|
||||
|
||||
- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine. The packages may affect each other and cause errors.
|
||||
|
||||
- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information.
|
||||
|
||||
```
|
||||
$ sudo rm -f /var/lib/dpkg/info/tdengine*
|
||||
```
|
||||
|
||||
You can then reinstall if needed.
|
||||
|
||||
- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information.
|
||||
|
||||
```
|
||||
$ sudo rpm -e --noscripts tdengine
|
||||
```
|
||||
|
||||
You can then reinstall if needed.
|
||||
|
||||
:::
|
||||
|
||||
Uninstalling and Modifying Files
|
||||
|
||||
- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data.
|
||||
|
||||
- When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used.
|
||||
|
||||
|
||||
## Upgrade
|
||||
There are two aspects in upgrade operation: upgrade installation package and upgrade a running server.
|
||||
|
||||
To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version.
|
||||
|
||||
Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 2 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
|
||||
- Stop inserting data
|
||||
- Make sure all data is persisted to disk, please use command `flush database`
|
||||
- Stop the cluster of TDengine
|
||||
- Uninstall old version and install new version
|
||||
- Start the cluster of TDengine
|
||||
- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss
|
||||
- Run some simple data insertion statements to make sure the cluster works well
|
||||
- Restore business services
|
||||
|
||||
:::warning
|
||||
TDengine doesn't guarantee any lower version is compatible with the data generated by a higher version, so it's never recommended to downgrade the version.
|
||||
|
||||
:::
|
|
@ -41,8 +41,6 @@ An existing Grafana Notification Channel can be specified with parameter `-E`, t
|
|||
|
||||
Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
|
||||
|
||||
For more use cases and restrictions please refer to [TDinsight](/reference/tdinsight/).
|
||||
|
||||
## log database
|
||||
|
||||
The data of tdinsight dashboard is stored in `log` database (default. You can change it in taoskeeper's config file. For more infrmation, please reference to [taoskeeper document](/reference/taosKeeper)). The taoskeeper will create log database on taoskeeper startup.
|
||||
|
@ -106,22 +104,22 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
|||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|uptime|FLOAT||dnode uptime|
|
||||
|uptime|FLOAT||dnode uptime in `days`|
|
||||
|cpu\_engine|FLOAT||cpu usage of tdengine. read from `/proc/<taosd_pid>/stat`|
|
||||
|cpu\_system|FLOAT||cpu usage of server. read from `/proc/stat`|
|
||||
|cpu\_cores|FLOAT||cpu cores of server|
|
||||
|mem\_engine|INT||memory usage of tdengine. read from `/proc/<taosd_pid>/status`|
|
||||
|mem\_system|INT||available memory on the server|
|
||||
|mem\_system|INT||available memory on the server in `KB`|
|
||||
|mem\_total|INT||total memory of server in `KB`|
|
||||
|disk\_engine|INT|||
|
||||
|disk\_used|BIGINT||usage of data dir in `bytes`|
|
||||
|disk\_total|BIGINT||the capacity of data dir in `bytes`|
|
||||
|net\_in|FLOAT||network throughput rate in kb/s. read from `/proc/net/dev`|
|
||||
|net\_out|FLOAT||network throughput rate in kb/s. read from `/proc/net/dev`|
|
||||
|io\_read|FLOAT||io throughput rate in kb/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_write|FLOAT||io throughput rate in kb/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_read\_disk|FLOAT||io throughput rate of disk in kb/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_write\_disk|FLOAT||io throughput rate of disk in kb/s. read from `/proc/<taosd_pid>/io`|
|
||||
|net\_in|FLOAT||network throughput rate in byte/s. read from `/proc/net/dev`|
|
||||
|net\_out|FLOAT||network throughput rate in byte/s. read from `/proc/net/dev`|
|
||||
|io\_read|FLOAT||io throughput rate in byte/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_write|FLOAT||io throughput rate in byte/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_read\_disk|FLOAT||io throughput rate of disk in byte/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_write\_disk|FLOAT||io throughput rate of disk in byte/s. read from `/proc/<taosd_pid>/io`|
|
||||
|req\_select|INT||number of select queries received per dnode|
|
||||
|req\_select\_rate|FLOAT||number of select queries received per dnode divided by monitor interval.|
|
||||
|req\_insert|INT||number of insert queries received per dnode|
|
||||
|
@ -150,9 +148,9 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
|||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||data directory. default is `/var/lib/taos`|
|
||||
|level|INT||level for multi-level storage|
|
||||
|avail|BIGINT||available space for data directory|
|
||||
|used|BIGINT||used space for data directory|
|
||||
|total|BIGINT||total space for data directory|
|
||||
|avail|BIGINT||available space for data directory in `bytes`|
|
||||
|used|BIGINT||used space for data directory in `bytes`|
|
||||
|total|BIGINT||total space for data directory in `bytes`|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
@ -165,9 +163,9 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
|||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||log directory. default is `/var/log/taos/`|
|
||||
|avail|BIGINT||available space for log directory|
|
||||
|used|BIGINT||used space for data directory|
|
||||
|total|BIGINT||total space for data directory|
|
||||
|avail|BIGINT||available space for log directory in `bytes`|
|
||||
|used|BIGINT||used space for data directory in `bytes`|
|
||||
|total|BIGINT||total space for data directory in `bytes`|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
@ -180,9 +178,9 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
|||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||temp directory. default is `/tmp/`|
|
||||
|avail|BIGINT||available space for temp directory|
|
||||
|used|BIGINT||used space for temp directory|
|
||||
|total|BIGINT||total space for temp directory|
|
||||
|avail|BIGINT||available space for temp directory in `bytes`|
|
||||
|used|BIGINT||used space for temp directory in `bytes`|
|
||||
|total|BIGINT||total space for temp directory in `bytes`|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
|
|
@ -1093,7 +1093,7 @@ TaosConsumer consumer = new TaosConsumer<>(config);
|
|||
- httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
|
||||
- messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
|
||||
- httpPoolSize: Maximum number of concurrent requests on the a connection。It only takes effect when using WebSocket type.
|
||||
- For more information, see [Consumer Parameters](../../../develop/tmq).
|
||||
- For more information, see [Consumer Parameters](../../../develop/tmq). Note that the default value of auto.offset.reset in data subscription on the TDengine server has changed since version 3.2.0.0.
|
||||
|
||||
#### Subscribe to consume data
|
||||
|
||||
|
@ -1193,7 +1193,7 @@ public abstract class ConsumerLoop {
|
|||
config.setProperty("bootstrap.servers", "localhost:6030");
|
||||
config.setProperty("td.connect.user", "root");
|
||||
config.setProperty("td.connect.pass", "taosdata");
|
||||
config.setProperty("auto.offset.reset", "earliest");
|
||||
config.setProperty("auto.offset.reset", "latest");
|
||||
config.setProperty("msg.with.table.name", "true");
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("auto.commit.interval.ms", "1000");
|
||||
|
@ -1276,7 +1276,7 @@ public abstract class ConsumerLoop {
|
|||
config.setProperty("bootstrap.servers", "localhost:6041");
|
||||
config.setProperty("td.connect.user", "root");
|
||||
config.setProperty("td.connect.pass", "taosdata");
|
||||
config.setProperty("auto.offset.reset", "earliest");
|
||||
config.setProperty("auto.offset.reset", "latest");
|
||||
config.setProperty("msg.with.table.name", "true");
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("auto.commit.interval.ms", "1000");
|
||||
|
|
|
@ -794,7 +794,7 @@ The TDengine Go Connector supports subscription functionality with the following
|
|||
```go
|
||||
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"auto.offset.reset": "latest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
|
@ -870,6 +870,7 @@ package main
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/taosdata/driver-go/v3/af"
|
||||
"github.com/taosdata/driver-go/v3/af/tmq"
|
||||
|
@ -890,19 +891,16 @@ func main() {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_client",
|
||||
"enable.auto.commit": "false",
|
||||
"msg.with.table.name": "true",
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "latest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_client",
|
||||
"enable.auto.commit": "false",
|
||||
"msg.with.table.name": "true",
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -915,10 +913,16 @@ func main() {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
ev := consumer.Poll(500)
|
||||
if ev != nil {
|
||||
|
@ -972,6 +976,7 @@ package main
|
|||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/taosdata/driver-go/v3/common"
|
||||
tmqcommon "github.com/taosdata/driver-go/v3/common/tmq"
|
||||
|
@ -995,7 +1000,7 @@ func main() {
|
|||
"td.connect.pass": "taosdata",
|
||||
"group.id": "example",
|
||||
"client.id": "example_consumer",
|
||||
"auto.offset.reset": "earliest",
|
||||
"auto.offset.reset": "latest",
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -1004,29 +1009,34 @@ func main() {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
_, err = db.Exec("create table example_ws_tmq.t_all(ts timestamp," +
|
||||
"c1 bool," +
|
||||
"c2 tinyint," +
|
||||
"c3 smallint," +
|
||||
"c4 int," +
|
||||
"c5 bigint," +
|
||||
"c6 tinyint unsigned," +
|
||||
"c7 smallint unsigned," +
|
||||
"c8 int unsigned," +
|
||||
"c9 bigint unsigned," +
|
||||
"c10 float," +
|
||||
"c11 double," +
|
||||
"c12 binary(20)," +
|
||||
"c13 nchar(20)" +
|
||||
")")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
go func() {
|
||||
_, err := db.Exec("create table example_ws_tmq.t_all(ts timestamp," +
|
||||
"c1 bool," +
|
||||
"c2 tinyint," +
|
||||
"c3 smallint," +
|
||||
"c4 int," +
|
||||
"c5 bigint," +
|
||||
"c6 tinyint unsigned," +
|
||||
"c7 smallint unsigned," +
|
||||
"c8 int unsigned," +
|
||||
"c9 bigint unsigned," +
|
||||
"c10 float," +
|
||||
"c11 double," +
|
||||
"c12 binary(20)," +
|
||||
"c13 nchar(20)" +
|
||||
")")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
for {
|
||||
_, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
}
|
||||
|
||||
}()
|
||||
for i := 0; i < 5; i++ {
|
||||
ev := consumer.Poll(500)
|
||||
|
|
|
@ -442,7 +442,7 @@ The following parameters can be configured for the TMQ DSN. Only `group.id` is m
|
|||
|
||||
- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis.
|
||||
- `client.id`: Subscriber client ID.
|
||||
- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group.
|
||||
- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default value varies depending on the TDengine version. For details, see [Data Subscription](https://docs.tdengine.com/develop/tmq/). Note: This parameter is set per consumer group.
|
||||
- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential.
|
||||
- `auto.commit.interval.ms`: Interval for automatic commits.
|
||||
|
||||
|
|
|
@ -31,11 +31,13 @@ We recommend using the latest version of `taospy`, regardless of the version of
|
|||
|
||||
|Python Connector Version|major changes|
|
||||
|:-------------------:|:----:|
|
||||
|2.7.12|1. added support for `varbinary` type (STMT does not yet support)<br/> 2. improved query performance (thanks to contributor [hadrianl](https://github.com/taosdata/taos-connector-python/pull/209))|
|
||||
|2.7.9|support for getting assignment and seek function on subscription|
|
||||
|2.7.8|add `execute_many` method|
|
||||
|
||||
|Python Websocket Connector Version|major changes|
|
||||
|:----------------------------:|:-----:|
|
||||
|0.2.9|bugs fixes|
|
||||
|0.2.5|1. support for getting assignment and seek function on subscription <br/> 2. support schemaless <br/> 3. support STMT|
|
||||
|0.2.4|support `unsubscribe` on subscription|
|
||||
|
||||
|
@ -1023,10 +1025,6 @@ Due to the current imperfection of Python's nanosecond support (see link below),
|
|||
1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds
|
||||
2. https://www.python.org/dev/peps/pep-0564/
|
||||
|
||||
## Important Update
|
||||
|
||||
[**Release Notes**] (https://github.com/taosdata/taos-connector-python/releases)
|
||||
|
||||
## API Reference
|
||||
|
||||
- [taos](https://docs.taosdata.com/api/taospy/taos/)
|
||||
|
|
|
@ -52,8 +52,6 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
|
|||
&& tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1
|
||||
```
|
||||
|
||||
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please find available versions in [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
|
||||
|
||||
**Non-Swoole Environment: **
|
||||
|
||||
```shell
|
||||
|
|
|
@ -4,7 +4,6 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
|
||||
<PkgListV3 type={1} sys="Linux" />
|
||||
|
||||
[All Downloads](../../releases/tdengine)
|
||||
|
||||
2. Unzip
|
||||
|
||||
|
|
|
@ -4,8 +4,6 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
|
||||
<PkgListV3 type={8} sys="macOS" />
|
||||
|
||||
[All Downloads](../../releases/tdengine)
|
||||
|
||||
2. Execute the installer, select the default value as prompted, and complete the installation. If the installation is blocked, you can right-click or ctrl-click on the installation package and select `Open`.
|
||||
3. configure taos.cfg
|
||||
|
||||
|
|
|
@ -3,8 +3,6 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
1. Download the client installation package
|
||||
|
||||
<PkgListV3 type={4} sys="Windows" />
|
||||
|
||||
[All Downloads](../../releases/tdengine)
|
||||
2. Execute the installer, select the default value as prompted, and complete the installation
|
||||
3. Installation path
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ taosAdapter provides the following features.
|
|||
|
||||
### Install taosAdapter
|
||||
|
||||
If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine 3.0 released versions](../../releases/tdengine) to download the TDengine server installation package. If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/3.0/BUILD.md) documentation.
|
||||
If you use the TDengine server, you don't need additional steps to install taosAdapter. If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/3.0/BUILD.md) documentation.
|
||||
|
||||
### Start/Stop taosAdapter
|
||||
|
||||
|
@ -180,7 +180,7 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl
|
|||
node_export is an exporter for machine metrics. Please visit [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) for more information.
|
||||
- Support for Prometheus remote_read and remote_write
|
||||
remote_read and remote_write are interfaces for Prometheus data read and write from/to other data storage solution. Please visit [https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) for more information.
|
||||
- Get table's VGroup ID. For more information about VGroup, please refer to [primary-logic-unit](/tdinternal/arch/#primary-logic-unit).
|
||||
- Get table's VGroup ID.
|
||||
|
||||
## Interfaces
|
||||
|
||||
|
@ -246,7 +246,7 @@ node_export is an exporter of hardware and OS metrics exposed by the \*NIX kerne
|
|||
|
||||
### Get table's VGroup ID
|
||||
|
||||
You can call `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` to get table's VGroup ID. For more information about VGroup, please refer to [primary-logic-unit](/tdinternal/arch/#primary-logic-unit).
|
||||
You can call `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` to get table's VGroup ID.
|
||||
|
||||
## Memory usage optimization methods
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDen
|
|||
|
||||
There are two ways to install taosBenchmark:
|
||||
|
||||
- Installing the official TDengine installer will automatically install taosBenchmark. Please refer to [TDengine installation](../../operation/pkg-install) for details.
|
||||
- Installing the official TDengine installer will automatically install taosBenchmark.
|
||||
|
||||
- Compile taos-tools separately and install them. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details.
|
||||
|
||||
|
@ -397,6 +397,7 @@ The configuration parameters for specifying super table tag columns and data col
|
|||
### Query scenario configuration parameters
|
||||
|
||||
`filetype` must be set to `query` in the query scenario.
|
||||
`query_times` is number of times queries were run.
|
||||
|
||||
To control the query scenario by setting `kill_slow_query_threshold` and `kill_slow_query_interval` parameters to kill the execution of slow query statements. Threshold controls exec_usec of query command will be killed by taosBenchmark after the specified time, in seconds; interval controls sleep time to avoid continuous querying of slow queries consuming CPU in seconds.
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
use letter and number only. Default is NOT.
|
||||
-n, --no-escape No escape char '`'. Default is using it.
|
||||
-Q, --dot-replace Repalce dot character with underline character in
|
||||
the table name.
|
||||
the table name.(Version 2.5.3)
|
||||
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
|
||||
8.
|
||||
-C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service
|
||||
|
@ -113,6 +113,10 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
-?, --help Give this help list
|
||||
--usage Give a short usage message
|
||||
-V, --version Print program version
|
||||
-W, --rename=RENAME-LIST Rename database name with new name during
|
||||
importing data. RENAME-LIST:
|
||||
"db1=newDB1|db2=newDB2" means rename db1 to newDB1
|
||||
and rename db2 to newDB2 (Version 2.5.4)
|
||||
|
||||
Mandatory or optional arguments to long options are also mandatory or optional
|
||||
for any corresponding short options.
|
||||
|
|
|
@ -652,6 +652,15 @@ The charset that takes effect is UTF-8.
|
|||
| Type | String |
|
||||
| Default Value | None |
|
||||
|
||||
### smlAutoChildTableNameDelimiter
|
||||
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------------------------ |
|
||||
| Applicable | Client only |
|
||||
| Meaning | Delimiter between tags as table name|
|
||||
| Type | String |
|
||||
| Default Value | None |
|
||||
|
||||
### smlTagName
|
||||
|
||||
| Attribute | Description |
|
||||
|
@ -731,16 +740,6 @@ The charset that takes effect is UTF-8.
|
|||
| Value Range | 0: not change; 1: change by modification |
|
||||
| Default Value | 0 |
|
||||
|
||||
### keepTimeOffset
|
||||
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Latency of data migration |
|
||||
| Unit | hour |
|
||||
| Value Range | 0-23 |
|
||||
| Default Value | 0 |
|
||||
|
||||
### tmqMaxTopicNum
|
||||
|
||||
| Attribute | Description |
|
||||
|
@ -807,4 +806,4 @@ The charset that takes effect is UTF-8.
|
|||
| 53 | udf | Yes | Yes | |
|
||||
| 54 | enableCoreFile | Yes | Yes | |
|
||||
| 55 | ttlChangeOnWrite | No | Yes | |
|
||||
| 56 | keepTimeOffset | Yes | Yes | |
|
||||
| 56 | keepTimeOffset | Yes | Yes(discarded since 3.2.0.0) | |
|
||||
|
|
|
@ -93,6 +93,8 @@ Note that tag_key1, tag_key2 are not the original order of the tags entered by t
|
|||
The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t\_" is a fixed prefix that every table generated by this mapping relationship has.
|
||||
:::
|
||||
|
||||
If you do not want to use an automatically generated table name, there are two ways to specify sub table names, the first one has a higher priority.
|
||||
You can configure smlAutoChildTableNameDelimiter in taos.cfg, for example, `smlAutoChildTableNameDelimiter=tname`. You can insert `st,t0=cpul,t1=4 c1=3 1626006833639000000` and the table name will be cpu1-4.
|
||||
You can configure smlChildTableName in taos.cfg to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||
|
||||
2. If the super table obtained by parsing the line protocol does not exist, this super table is created.
|
||||
|
|
|
@ -16,7 +16,7 @@ taosKeeper is a tool for TDengine that exports monitoring metrics. With taosKeep
|
|||
There are two ways to install taosKeeper:
|
||||
Methods of installing taosKeeper:
|
||||
|
||||
- Installing the official TDengine installer will automatically install taosKeeper. Please refer to [TDengine installation](../../operation/pkg-install) for details.
|
||||
- Installing the official TDengine installer will automatically install taosKeeper.
|
||||
|
||||
- You can compile taosKeeper separately and install it. Please refer to the [taosKeeper](https://github.com/taosdata/taoskeeper) repository for details.
|
||||
## Configuration and Launch
|
||||
|
|
|
@ -21,7 +21,7 @@ TDengine Source Connector is used to read data from TDengine in real-time and se
|
|||
1. Linux operating system
|
||||
2. Java 8 and Maven installed
|
||||
3. Git/curl/vi is installed
|
||||
4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](../../operation/pkg-install)
|
||||
4. TDengine is installed and started.
|
||||
|
||||
## Install Kafka
|
||||
|
||||
|
|
|
@ -10,76 +10,60 @@ description: How to use Seeq and TDengine to perform time series data analysis
|
|||
|
||||
Seeq is an advanced analytics software for the manufacturing industry and the Industrial Internet of Things (IIoT). Seeq supports the use of machine learning innovations within process manufacturing organizations. These capabilities enable organizations to deploy their own or third-party machine learning algorithms into advanced analytics applications used by frontline process engineers and subject matter experts, thus extending the efforts of a single data scientist to many frontline workers.
|
||||
|
||||
With the TDengine Java connector, Seeq effortlessly supports querying time series data provided by TDengine and offers functionalities such as data visualization, analysis, and forecasting.
|
||||
TDengine can be added as a data source into Seeq via JDBC connector. Once data source is configured, Seeq can read data from TDengine and offers functionalities such as data visualization, analysis, and forecasting.
|
||||
|
||||
### Install Seeq
|
||||
## Prerequisite
|
||||
|
||||
Please download Seeq Server and Seeq Data Lab software installation package from the [Seeq official website](https://www.seeq.com/customer-download).
|
||||
1. Install Seeq Server and Seeq Data Lab software
|
||||
2. Install TDengine or register TDengine Cloud service
|
||||
|
||||
### Install and start Seeq Server
|
||||
|
||||
```
|
||||
tar xvzf seeq-server-xxx.tar.gz
|
||||
cd seeq-server-installer
|
||||
sudo ./install
|
||||
|
||||
sudo seeq service enable
|
||||
sudo seeq start
|
||||
```
|
||||
|
||||
### Install and start Seeq Data Lab Server
|
||||
|
||||
Seeq Data Lab needs to be installed on a separate server from Seeq Server and connected to Seeq Server through configuration. For detailed installation and configuration instructions, please refer to [the official documentation](https://support.seeq.com/space/KB/1034059842).
|
||||
|
||||
```
|
||||
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
|
||||
sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq
|
||||
sudo seeq config set Network/DataLab/Hostname localhost
|
||||
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231)
|
||||
sudo seeq config set Network/Hostname <value> # the host IP or URL of the main Seeq Server
|
||||
|
||||
# If the main Seeq server is configured to listen over HTTPS
|
||||
sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443)
|
||||
|
||||
# If the main Seeq server is NOT configured to listen over HTTPS
|
||||
sudo seeq config set Network/Webserver/Port <value>
|
||||
|
||||
#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server:
|
||||
sudo seeq config set Network/DataLab/Hostname <value> # the host IP (not URL) of the Data Lab server
|
||||
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231
|
||||
```
|
||||
|
||||
### Install TDengine on-premise instance
|
||||
|
||||
See [Quick Install from Package](../../get-started).
|
||||
|
||||
### Or use TDengine Cloud
|
||||
|
||||
Register for a [TDengine Cloud](https://cloud.tdengine.com) account and log in to your account.
|
||||
|
||||
## Make Seeq be able to access TDengine
|
||||
|
||||
1. Get data location configuration
|
||||
## Install TDengine JDBC connector
|
||||
|
||||
1. Get Seeq data location configuration
|
||||
```
|
||||
sudo seeq config get Folders/Data
|
||||
```
|
||||
|
||||
2. Download TDengine Java connector from maven.org. Please use the latest version (Current is 3.2.5, https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar).
|
||||
|
||||
2. Download the latest TDengine Java connector from maven.org (current is version is [3.2.5](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar)), and copy the JAR file into the_directory_found_in_step_1/plugins/lib/
|
||||
3. Restart Seeq server
|
||||
|
||||
```
|
||||
sudo seeq restart
|
||||
```
|
||||
|
||||
4. Input License
|
||||
## Add TDengine into Seeq's data source
|
||||
1. Open Seeq, login as admin, go to Administration, click "Add Data Source"
|
||||
2. For connector, choose SQL connector v2
|
||||
3. Inside "Additional Configuration" input box, copy and paste the following
|
||||
|
||||
Use a browser to access ip:34216 and input the license according to the guide.
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": []
|
||||
"Type": "GENERIC",
|
||||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": null,
|
||||
"Password": null,
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
"UseWindowsAuth": false,
|
||||
"SqlFetchBatchSize": 100000,
|
||||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://localhost:6030/?user=root&password=taosdata",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## How to use Seeq to analyze time-series data that TDengine serves
|
||||
Note: You need to replace DatabaseJdbcUrl with your setting. Please login TDengine cloud or open taosExplorer for enterprise edition, click programming -> Java to find yours. For the "QueryDefintions", please follow the examples below to write your own.
|
||||
|
||||
This chapter demonstrates how to use Seeq software in conjunction with TDengine for time series data analysis.
|
||||
## Use Seeq to analyze time-series data stored inside TDengine
|
||||
|
||||
This chapter demonstrates how to use Seeq with TDengine for time series data analysis.
|
||||
|
||||
### Scenario Overview
|
||||
|
||||
|
@ -150,8 +134,8 @@ Please login with Seeq administrator and create a few data sources as following.
|
|||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"Username": null,
|
||||
"Password": null,
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
|
@ -210,8 +194,8 @@ Please login with Seeq administrator and create a few data sources as following.
|
|||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"Username": null,
|
||||
"Password": null,
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
|
@ -269,8 +253,8 @@ Please login with Seeq administrator and create a few data sources as following.
|
|||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"Username": null,
|
||||
"Password": null,
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
|
@ -289,13 +273,13 @@ Please login with Seeq administrator and create a few data sources as following.
|
|||
|
||||
#### Launch Seeq Workbench
|
||||
|
||||
Please login to Seeq server with IP:port and create a new Seeq Workbench, then select data sources and choose the correct tools to do data visualization and analysis. Please refer to [the official documentation](https://support.seeq.com/space/KB/146440193/Seeq+Workbench) for the details.
|
||||
Please login to Seeq server and create a new Seeq Workbench, then select data sources and choose the correct tools to do data visualization and analysis. Please refer to [the official documentation](https://support.seeq.com/space/KB/146440193/Seeq+Workbench) for the details.
|
||||
|
||||

|
||||
|
||||
#### Use Seeq Data Lab Server for advanced data analysis
|
||||
|
||||
Please login to the Seeq service with IP:port and create a new Seeq Data Lab. Then you can use advanced tools including Python environment and machine learning add-ons for more complex analysis.
|
||||
Please login to the Seeq service and create a new Seeq Data Lab. Then you can use advanced tools including Python environment and machine learning add-ons for more complex analysis.
|
||||
|
||||
```Python
|
||||
from seeq import spy
|
||||
|
@ -370,13 +354,15 @@ Please note that when using TDengine Cloud, you need to specify the database nam
|
|||
|
||||
#### The data source of TDengine Cloud example
|
||||
|
||||
This data source contains the data from a smart meter in public database smartmeters.
|
||||
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": [
|
||||
{
|
||||
"Name": "CloudVoltage",
|
||||
"Type": "SIGNAL",
|
||||
"Sql": "SELECT ts, voltage FROM test.meters",
|
||||
"Sql": "SELECT ts, voltage FROM smartmeters.d1000",
|
||||
"Enabled": true,
|
||||
"TestMode": false,
|
||||
"TestQueriesDuringSync": true,
|
||||
|
@ -409,8 +395,8 @@ Please note that when using TDengine Cloud, you need to specify the database nam
|
|||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"Username": null,
|
||||
"Password": null,
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
|
@ -419,7 +405,7 @@ Please note that when using TDengine Cloud, you need to specify the database nam
|
|||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.cloud.taosdata.com?useSSL=true&token=41ac9d61d641b6b334e8b76f45f5a8XXXXXXXXXX",
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.us-west-2.aws.cloud.tdengine.com?useSSL=true&token=42b874395452d36f38dd6bf4317757611b213683",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
|
@ -433,8 +419,8 @@ Please note that when using TDengine Cloud, you need to specify the database nam
|
|||
|
||||
## Conclusion
|
||||
|
||||
By integrating Seeq and TDengine, it is possible to leverage the efficient storage and querying performance of TDengine while also benefiting from Seeq's powerful data visualization and analysis capabilities provided to users.
|
||||
By integrating Seeq and TDengine, you can leverage the efficient storage and querying performance of TDengine while also benefiting from Seeq's powerful data visualization and analysis capabilities provided to users.
|
||||
|
||||
This integration allows users to take advantage of TDengine's high-performance time-series data storage and retrieval, ensuring efficient handling of large volumes of data. At the same time, Seeq provides advanced analytics features such as data visualization, anomaly detection, correlation analysis, and predictive modeling, enabling users to gain valuable insights and make data-driven decisions.
|
||||
This integration allows users to take advantage of TDengine's high-performance time-series data storage and query, ensuring efficient handling of large volumes of data. At the same time, Seeq provides advanced analytics features such as data visualization, anomaly detection, correlation analysis, and predictive modeling, enabling users to gain valuable insights and make data-driven decisions.
|
||||
|
||||
Together, Seeq and TDengine provide a comprehensive solution for time series data analysis in diverse industries such as manufacturing, IIoT, and power systems. The combination of efficient data storage and advanced analytics empowers users to unlock the full potential of their time series data, driving operational improvements, and enabling predictive and prescriptive analytics applications.
|
||||
|
|
|
@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://t
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.2.0.0
|
||||
|
||||
<Release type="tdengine" version="3.2.0.0" />
|
||||
|
||||
## 3.1.1.0
|
||||
|
||||
<Release type="tdengine" version="3.1.1.0" />
|
||||
|
|
|
@ -3,6 +3,7 @@ package main
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/taosdata/driver-go/v3/af"
|
||||
"github.com/taosdata/driver-go/v3/af/tmq"
|
||||
|
@ -27,15 +28,15 @@ func main() {
|
|||
panic(err)
|
||||
}
|
||||
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_client",
|
||||
"enable.auto.commit": "false",
|
||||
"msg.with.table.name": "true",
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "latest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_client",
|
||||
"enable.auto.commit": "false",
|
||||
"msg.with.table.name": "true",
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -48,12 +49,17 @@ func main() {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
time.Sleep(time.Microsecond * 100)
|
||||
}
|
||||
}()
|
||||
for i := 0; i < 5; i++ {
|
||||
ev := consumer.Poll(0)
|
||||
ev := consumer.Poll(500)
|
||||
if ev != nil {
|
||||
switch e := ev.(type) {
|
||||
case *tmqcommon.DataMessage:
|
||||
|
|
|
@ -66,7 +66,6 @@ public class SubscribeDemo {
|
|||
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
|
||||
"com.taos.example.MetersDeserializer");
|
||||
properties.setProperty(TMQConstants.VALUE_DESERIALIZER_ENCODING, "UTF-8");
|
||||
properties.setProperty(TMQConstants.EXPERIMENTAL_SNAPSHOT_ENABLE, "true");
|
||||
|
||||
// poll data
|
||||
try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) {
|
||||
|
|
|
@ -66,7 +66,6 @@ public class WebsocketSubscribeDemo {
|
|||
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
|
||||
"com.taos.example.MetersDeserializer");
|
||||
properties.setProperty(TMQConstants.VALUE_DESERIALIZER_ENCODING, "UTF-8");
|
||||
properties.setProperty(TMQConstants.EXPERIMENTAL_SNAPSHOT_ENABLE, "true");
|
||||
|
||||
// poll data
|
||||
try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) {
|
||||
|
|
|
@ -23,9 +23,6 @@ def taos_get_assignment_and_seek_demo():
|
|||
consumer = Consumer(
|
||||
{
|
||||
"group.id": "0",
|
||||
# should disable snapshot,
|
||||
# otherwise it will cause invalid params error
|
||||
"experimental.snapshot.enable": "false",
|
||||
}
|
||||
)
|
||||
consumer.subscribe(["tmq_assignment_demo_topic"])
|
||||
|
|
|
@ -21,9 +21,6 @@ def taosws_get_assignment_and_seek_demo():
|
|||
prepare()
|
||||
consumer = taosws.Consumer(conf={
|
||||
"td.connect.websocket.scheme": "ws",
|
||||
# should disable snapshot,
|
||||
# otherwise it will cause invalid params error
|
||||
"experimental.snapshot.enable": "false",
|
||||
"group.id": "0",
|
||||
})
|
||||
consumer.subscribe(["tmq_assignment_demo_topic"])
|
||||
|
|
|
@ -4,20 +4,14 @@ description: 简要介绍 TDengine 的主要功能
|
|||
toc_max_heading_level: 2
|
||||
---
|
||||
|
||||
TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/),且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。
|
||||
TDengine 是一款专为物联网、工业互联网等场景设计并优化的大数据平台,它能安全高效地将大量设备、数据采集器每天产生的高达 TB 甚至 PB 级的数据进行汇聚、存储、分析和分发,对业务运行状态进行实时监测、预警,提供实时的商业洞察。其核心模块是高性能、集群开源、云原生、极简的时序数据库 TDengine OSS。
|
||||
|
||||
本章节介绍 TDengine 的主要产品和功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
|
||||
|
||||
## 主要产品
|
||||
|
||||
TDengine 有三个主要产品:TDengine Enterprise (即 TDengine 企业版),TDengine Cloud,和 TDengine OSS,关于它们的具体定义请参考
|
||||
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
|
||||
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
|
||||
- [TDengine 开源版](https://www.taosdata.com/tdengine-oss)
|
||||
本节介绍 TDengine OSS 的主要产品和功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine OSS 有个整体了解
|
||||
|
||||
## 主要功能
|
||||
|
||||
TDengine 的主要功能如下:
|
||||
TDengine OSS 的主要功能如下:
|
||||
|
||||
1. 写入数据,支持
|
||||
- [SQL 写入](../develop/insert-data/sql-writing)
|
||||
|
@ -150,3 +144,10 @@ TDengine 的主要功能如下:
|
|||
- [TDengine VS InfluxDB ,写入性能大 PK !](https://www.taosdata.com/2021/11/05/3248.html)
|
||||
- [TDengine 和 InfluxDB 查询性能对比测试报告](https://www.taosdata.com/2022/02/22/5969.html)
|
||||
- [TDengine 与 InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse 等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf)
|
||||
|
||||
|
||||
## 主要产品
|
||||
|
||||
TDengine 有两个主要产品:TDengine Enterprise (即 TDengine 企业版)和 TDengine Cloud,关于它们的具体定义请参考
|
||||
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
|
||||
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
|
||||
|
|
|
@ -4,7 +4,7 @@ description: '快速设置 TDengine 环境并体验其高效写入和查询'
|
|||
---
|
||||
|
||||
import xiaot from './xiaot.webp'
|
||||
import xiaot_new from './xiaot-03.webp'
|
||||
import xiaot_new from './xiaot-20231007.png'
|
||||
import channel from './channel.webp'
|
||||
import official_account from './official-account.webp'
|
||||
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 112 KiB |
|
@ -38,7 +38,10 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
|
|||
- field_set 中的每个数据项都需要对自身的数据类型进行描述, 比如 1.2f32 代表 FLOAT 类型的数值 1.2, 如果不带类型后缀会被当作 DOUBLE 处理
|
||||
- timestamp 支持多种时间精度。写入数据的时候需要用参数指定时间精度,支持从小时到纳秒的 6 种时间精度
|
||||
- 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常。(3.0.1.3 之后的版本 smlDataFormat 默认为 false,从3.0.3.0开始,该配置废弃) [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
- 默认产生的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
- 子表名生成规则
|
||||
- 默认产生的子表名是根据规则生成的唯一 ID 值。
|
||||
- 用户也可以通过在client端的 taos.cfg 里配置 smlAutoChildTableNameDelimiter 参数来指定连接标签之间的分隔符,连接起来后作为子表名。举例如下:配置 smlAutoChildTableNameDelimiter=-, 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1-4。
|
||||
- 用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -31,8 +31,11 @@ OpenTSDB 行协议同样采用一行字符串来表示一行数据。OpenTSDB
|
|||
```txt
|
||||
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
|
||||
```
|
||||
- 子表名生成规则
|
||||
- 默认产生的子表名是根据规则生成的唯一 ID 值。
|
||||
- 用户也可以通过在client端的 taos.cfg 里配置 smlAutoChildTableNameDelimiter 参数来指定连接标签之间的分隔符,连接起来后作为子表名。举例如下:配置 smlAutoChildTableNameDelimiter=-, 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1-4。
|
||||
- 用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
|
||||
- 默认生产的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 meters.current 1648432611250 11.3 tname=cpu1 location=California.LosAngeles groupid=3 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
|
||||
参考 [OpenTSDB Telnet API 文档](http://opentsdb.net/docs/build/html/api_telnet/put.html)。
|
||||
|
||||
## 示例代码
|
||||
|
|
|
@ -47,7 +47,10 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据
|
|||
:::note
|
||||
|
||||
- 对于 JSON 格式协议,TDengine 并不会自动把所有标签转成 NCHAR 类型, 字符串将将转为 NCHAR 类型, 数值将同样转换为 DOUBLE 类型。
|
||||
- 默认生成的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
|
||||
- 子表名生成规则
|
||||
- 默认产生的子表名是根据规则生成的唯一 ID 值。
|
||||
- 用户也可以通过在client端的 taos.cfg 里配置 smlAutoChildTableNameDelimiter 参数来指定连接标签之间的分隔符,连接起来后作为子表名。举例如下:配置 smlAutoChildTableNameDelimiter=-, 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1-4。
|
||||
- 用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -23,22 +23,30 @@ import CDemo from "./_sub_c.mdx";
|
|||
|
||||
为了实现上述功能,TDengine 会为 WAL (Write-Ahead-Log) 文件自动创建索引以支持快速随机访问,并提供了灵活可配置的文件切换与保留机制:用户可以按需指定 WAL 文件保留的时间以及大小(详见 create database 语句)。通过以上方式将 WAL 改造成了一个保留事件到达顺序的、可持久化的存储引擎(但由于 TSDB 具有远比 WAL 更高的压缩率,我们不推荐保留太长时间,一般来说,不超过几天)。 对于以 topic 形式创建的查询,TDengine 将对接 WAL 而不是 TSDB 作为其存储引擎。在消费时,TDengine 根据当前消费进度从 WAL 直接读取数据,并使用统一的查询引擎实现过滤、变换等操作,将数据推送给消费者。
|
||||
|
||||
本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
|
||||
下面为关于数据订阅的一些说明,需要对TDengine的架构有一些了解,结合各个语言链接器的接口使用。
|
||||
- 一个消费组消费同一个topic下的所有数据,不同消费组之间相互独立;
|
||||
- 一个消费组消费同一个topic所有的vgroup,消费组可由多个消费者组成,但一个vgroup仅被一个消费者消费,如果消费者数量超过了vgroup数量,多余的消费者不消费数据;
|
||||
- 在服务端每个vgroup仅保存一个offset,每个vgroup的offset是单调递增的,但不一定连续。各个vgroup的offset之间没有关联;
|
||||
- 每次poll服务端会返回一个结果block,该block属于一个vgroup,可能包含多个wal版本的数据,可以通过 offset 接口获得是该block第一条记录的offset;
|
||||
- 一个消费组如果从未commit过offset,当其成员消费者重启重新拉取数据时,均从参数auto.offset.reset设定值开始消费;在一个消费者生命周期中,客户端本地记录了最近一次拉取数据的offset,不会拉取重复数据;
|
||||
- 消费者如果异常终止(没有调用tmq_close),需等约12秒后触发其所属消费组rebalance,该消费者在服务端状态变为LOST,约1天后该消费者自动被删除;正常退出,退出后就会删除消费者;新增消费者,需等约2秒触发rebalance,该消费者在服务端状态变为ready;
|
||||
- 消费组rebalance会对该组所有ready状态的消费者成员重新进行vgroup分配,消费者仅能对自己负责的vgroup进行assignment/seek/commit/poll操作;
|
||||
- 消费者可利用 position 获得当前消费的offset,并seek到指定offset,重新消费;
|
||||
- seek将position指向指定offset,不执行commit操作,一旦seek成功,可poll拉取指定offset及以后的数据;
|
||||
- seek 操作之前须调用 assignment 接口获取该consumer的vgroup ID和offset范围。seek 操作会检测vgroup ID 和 offset是否合法,如非法将报错;
|
||||
- position是获取当前的消费位置,是下次要取的位置,不是当前消费到的位置
|
||||
- commit是提交消费位置,不带参数的话,是提交当前消费位置(下次要取的位置,不是当前消费到的位置),带参数的话,是提交参数里的位置(也即下次退出重启后要取的位置)
|
||||
- seek是设置consumer消费位置,seek到哪,position就返回哪,都是下次要取的位置
|
||||
- seek不会影响commit,commit不影响seek,相互独立,两个是不同的概念
|
||||
- begin接口为wal 第一条数据的offset,end 接口为wal 最后一条数据的offset + 1
|
||||
- offset接口获取的是记录所在结果block块里的第一条数据的offset,当seek至该offset时,将消费到这个block里的全部数据。参见第四点;
|
||||
- 由于存在 WAL 过期删除机制,即使seek 操作成功,poll数据时有可能offset已失效。如果poll 的offset 小于 WAL 最小版本号,将会从WAL最小版本号消费;
|
||||
- 数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似;
|
||||
|
||||
说明(以c接口为例):
|
||||
1. 一个消费组消费同一个topic下的所有数据,不同消费组之间相互独立;
|
||||
2. 一个消费组消费同一个topic所有的vgroup,消费组可由多个消费者组成,但一个vgroup仅被一个消费者消费,如果消费者数量超过了vgroup数量,多余的消费者不消费数据;
|
||||
3. 在服务端每个vgroup仅保存一个offset,每个vgroup的offset是单调递增的,但不一定连续。各个vgroup的offset之间没有关联;
|
||||
4. 每次poll服务端会返回一个结果block,该block属于一个vgroup,可能包含多个wal版本的数据,可以通过 tmq_get_vgroup_offset 接口获得是该block第一条记录的offset;
|
||||
5. 一个消费组如果从未commit过offset,当其成员消费者重启重新拉取数据时,均从参数auto.offset.reset设定值开始消费;在一个消费者生命周期中,客户端本地记录了最近一次拉取数据的offset,不会拉取重复数据;
|
||||
6. 消费者如果异常终止(没有调用tmq_close),需等约12秒后触发其所属消费组rebalance,该消费者在服务端状态变为LOST,约1天后该消费者自动被删除;正常退出,退出后就会删除消费者;新增消费者,需等约2秒触发rebalance,该消费者在服务端状态变为ready;
|
||||
7. 消费组rebalance会对该组所有ready状态的消费者成员重新进行vgroup分配,消费者仅能对自己负责的vgroup进行assignment/seek/commit/poll操作;
|
||||
8. 消费者可利用 tmq_position 获得当前消费的offset,并seek到指定offset,重新消费;
|
||||
9. seek将position指向指定offset,不执行commit操作,一旦seek成功,可poll拉取指定offset及以后的数据;
|
||||
10. seek 操作之前须调用 tmq_get_topic_assignment 接口获取该consumer的vgroup ID和offset范围。seek 操作会检测vgroup ID 和 offset是否合法,如非法将报错;
|
||||
11. tmq_get_vgroup_offset接口获取的是记录所在结果block块里的第一条数据的offset,当seek至该offset时,将消费到这个block里的全部数据。参见第四点;
|
||||
12. 由于存在 WAL 过期删除机制,即使seek 操作成功,poll数据时有可能offset已失效。如果poll 的offset 小于 WAL 最小版本号,将会从WAL最小版本号消费;
|
||||
13. 数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似;
|
||||
本文档不对消息队列本身的知识做更多的介绍,如果需要了解,请自行搜索。
|
||||
|
||||
从3.2.0.0版本开始,数据订阅支持vnode迁移和分裂。
|
||||
由于数据订阅依赖wal文件,而在vnode迁移和分裂的过程中,wal并不会同步过去,所以迁移或分裂后,之前没消费完的wal数据后消费不到。所以请保证之前把数据全部消费完后,再进行vnode迁移或分裂,否则,消费会丢失数据。
|
||||
|
||||
## 主要数据结构和 API
|
||||
|
||||
|
@ -55,17 +63,17 @@ import CDemo from "./_sub_c.mdx";
|
|||
typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param));
|
||||
|
||||
typedef enum tmq_conf_res_t {
|
||||
TMQ_CONF_UNKNOWN = -2,
|
||||
TMQ_CONF_INVALID = -1,
|
||||
TMQ_CONF_OK = 0,
|
||||
} tmq_conf_res_t;
|
||||
TMQ_CONF_UNKNOWN = -2,
|
||||
TMQ_CONF_INVALID = -1,
|
||||
TMQ_CONF_OK = 0,
|
||||
} tmq_conf_res_t;
|
||||
|
||||
typedef struct tmq_topic_assignment {
|
||||
int32_t vgId;
|
||||
int64_t currentOffset;
|
||||
int64_t begin;
|
||||
int64_t end;
|
||||
} tmq_topic_assignment;
|
||||
int32_t vgId;
|
||||
int64_t currentOffset;
|
||||
int64_t begin;
|
||||
int64_t end;
|
||||
} tmq_topic_assignment;
|
||||
|
||||
DLL_EXPORT tmq_conf_t *tmq_conf_new();
|
||||
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
|
||||
|
@ -98,7 +106,7 @@ import CDemo from "./_sub_c.mdx";
|
|||
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
|
||||
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
|
||||
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res);
|
||||
DLL_EXPORT const char *tmq_err2str(int32_t code);DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
|
||||
DLL_EXPORT const char *tmq_err2str(int32_t code);
|
||||
```
|
||||
|
||||
下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
|
||||
|
@ -343,10 +351,11 @@ CREATE TOPIC topic_name [with meta] AS DATABASE db_name;
|
|||
| `td.connect.port` | integer | 服务端的端口号 | |
|
||||
| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | <br />**必填项**。最大长度:192。<br />每个topic最多可建立100个 consumer group |
|
||||
| `client.id` | string | 客户端 ID | 最大长度:192。 |
|
||||
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default;从头开始订阅; <br/>`latest`: 仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
|
||||
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default(version < 3.2.0.0);从头开始订阅; <br/>`latest`: default(version >= 3.2.0.0);仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
|
||||
| `enable.auto.commit` | boolean | 是否启用消费位点自动提交,true: 自动提交,客户端应用无需commit;false:客户端应用需要自行commit | 默认值为 true |
|
||||
| `auto.commit.interval.ms` | integer | 消费记录自动提交消费位点时间间隔,单位为毫秒 | 默认值为 5000 |
|
||||
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句) |默认关闭 |
|
||||
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句)(从3.2.0.0版本该参数废弃,恒为true) |默认关闭 |
|
||||
| `enable.replay` | boolean | 是否开启数据回放功能 |默认关闭 |
|
||||
|
||||
对于不同编程语言,其设置方式如下:
|
||||
|
||||
|
@ -362,7 +371,7 @@ tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
|
|||
tmq_conf_set(conf, "group.id", "cgrpName");
|
||||
tmq_conf_set(conf, "td.connect.user", "root");
|
||||
tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
||||
tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
||||
tmq_conf_set(conf, "auto.offset.reset", "latest");
|
||||
tmq_conf_set(conf, "msg.with.table.name", "true");
|
||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||
|
||||
|
@ -392,7 +401,7 @@ properties.setProperty("group.id", "cgrpName");
|
|||
properties.setProperty("bootstrap.servers", "127.0.0.1:6030");
|
||||
properties.setProperty("td.connect.user", "root");
|
||||
properties.setProperty("td.connect.pass", "taosdata");
|
||||
properties.setProperty("auto.offset.reset", "earliest");
|
||||
properties.setProperty("auto.offset.reset", "latest");
|
||||
properties.setProperty("msg.with.table.name", "true");
|
||||
properties.setProperty("value.deserializer", "com.taos.example.MetersDeserializer");
|
||||
|
||||
|
@ -412,7 +421,7 @@ public class MetersDeserializer extends ReferenceDeserializer<Meters> {
|
|||
```go
|
||||
conf := &tmq.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"auto.offset.reset": "latest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
|
@ -432,7 +441,7 @@ consumer, err := NewConsumer(conf)
|
|||
let mut dsn: Dsn = "taos://".parse()?;
|
||||
dsn.set("group.id", "group1");
|
||||
dsn.set("client.id", "test");
|
||||
dsn.set("auto.offset.reset", "earliest");
|
||||
dsn.set("auto.offset.reset", "latest");
|
||||
|
||||
let tmq = TmqBuilder::from_dsn(dsn)?;
|
||||
|
||||
|
@ -451,7 +460,19 @@ from taos.tmq import Consumer
|
|||
# Syntax: `consumer = Consumer(configs)`
|
||||
#
|
||||
# Example:
|
||||
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||
consumer = Consumer(
|
||||
{
|
||||
"group.id": "local",
|
||||
"client.id": "1",
|
||||
"enable.auto.commit": "true",
|
||||
"auto.commit.interval.ms": "1000",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"auto.offset.reset": "latest",
|
||||
"msg.with.table.name": "true",
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -468,7 +489,7 @@ let consumer = taos.consumer({
|
|||
'group.id': 'tg2',
|
||||
'td.connect.user': 'root',
|
||||
'td.connect.pass': 'taosdata',
|
||||
'auto.offset.reset','earliest',
|
||||
'auto.offset.reset','latest',
|
||||
'msg.with.table.name': 'true',
|
||||
'td.connect.ip','127.0.0.1',
|
||||
'td.connect.port','6030'
|
||||
|
@ -491,7 +512,7 @@ var cfg = new ConsumerConfig
|
|||
GourpId = "TDengine-TMQ-C#",
|
||||
TDConnectUser = "root",
|
||||
TDConnectPasswd = "taosdata",
|
||||
AutoOffsetReset = "earliest"
|
||||
AutoOffsetReset = "latest"
|
||||
MsgWithTableName = "true",
|
||||
TDConnectIp = "127.0.0.1",
|
||||
TDConnectPort = "6030"
|
||||
|
@ -507,6 +528,24 @@ var consumer = new ConsumerBuilder(cfg).Build();
|
|||
|
||||
上述配置中包括 consumer group ID,如果多个 consumer 指定的 consumer group ID 一样,则自动形成一个 consumer group,共享消费进度。
|
||||
|
||||
数据回放功能说明:
|
||||
- 订阅增加 replay 功能,按照数据写入的时间回放。
|
||||
比如,如下时间写入三条数据
|
||||
```sql
|
||||
2023/09/22 00:00:00.000
|
||||
2023/09/22 00:00:05.000
|
||||
2023/09/22 00:00:08.000
|
||||
```
|
||||
则订阅出第一条数据 5s 后返回第二条数据,获取第二条数据 3s 后返回第三条数据。
|
||||
- 仅列订阅支持数据回放
|
||||
- 回放需要保证独立时间线
|
||||
- 如果是子表订阅或者普通表订阅,只有一个vnode上有数据,保证是一个时间线
|
||||
- 如果超级表订阅,则需保证该 DB 只有一个vnode,否则报错(因为多个vnode上订阅出的数据不在一个时间线上)
|
||||
- 超级表和库订阅不支持回放
|
||||
- 增加 enable.replay 参数,true表示开启订阅回放功能,false表示不开启订阅回放功能,默认不开启。
|
||||
- 回放不支持进度保存,所以回放参数 enable.replay = true 时,auto commit 自动关闭
|
||||
- 因为数据回放本身需要处理时间,所以回放的精度存在几十ms的误差
|
||||
|
||||
## 订阅 *topics*
|
||||
|
||||
一个 consumer 支持同时订阅多个 topic。
|
||||
|
|
|
@ -1095,7 +1095,8 @@ TaosConsumer consumer = new TaosConsumer<>(config);
|
|||
- httpConnectTimeout: 创建连接超时参数,单位 ms,默认为 5000 ms。仅在 WebSocket 连接下有效。
|
||||
- messageWaitTimeout: 数据传输超时参数,单位 ms,默认为 10000 ms。仅在 WebSocket 连接下有效。
|
||||
- httpPoolSize: 同一个连接下最大并行请求数。仅在 WebSocket 连接下有效。
|
||||
其他参数请参考:[Consumer 参数列表](../../develop/tmq#创建-consumer-以及consumer-group)
|
||||
其他参数请参考:[Consumer 参数列表](../../develop/tmq#创建-consumer-以及consumer-group), 注意TDengine服务端自3.2.0.0版本开始消息订阅中的auto.offset.reset默认值发生变化。
|
||||
|
||||
|
||||
#### 订阅消费数据
|
||||
|
||||
|
@ -1193,7 +1194,7 @@ public abstract class ConsumerLoop {
|
|||
config.setProperty("bootstrap.servers", "localhost:6030");
|
||||
config.setProperty("td.connect.user", "root");
|
||||
config.setProperty("td.connect.pass", "taosdata");
|
||||
config.setProperty("auto.offset.reset", "earliest");
|
||||
config.setProperty("auto.offset.reset", "latest");
|
||||
config.setProperty("msg.with.table.name", "true");
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("auto.commit.interval.ms", "1000");
|
||||
|
@ -1201,7 +1202,6 @@ public abstract class ConsumerLoop {
|
|||
config.setProperty("client.id", "1");
|
||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||
config.setProperty("value.deserializer.encoding", "UTF-8");
|
||||
config.setProperty("experimental.snapshot.enable", "true");
|
||||
|
||||
this.consumer = new TaosConsumer<>(config);
|
||||
this.topics = Collections.singletonList("topic_speed");
|
||||
|
@ -1279,7 +1279,7 @@ public abstract class ConsumerLoop {
|
|||
config.setProperty("bootstrap.servers", "localhost:6041");
|
||||
config.setProperty("td.connect.user", "root");
|
||||
config.setProperty("td.connect.pass", "taosdata");
|
||||
config.setProperty("auto.offset.reset", "earliest");
|
||||
config.setProperty("auto.offset.reset", "latest");
|
||||
config.setProperty("msg.with.table.name", "true");
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("auto.commit.interval.ms", "1000");
|
||||
|
@ -1287,7 +1287,6 @@ public abstract class ConsumerLoop {
|
|||
config.setProperty("client.id", "1");
|
||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||
config.setProperty("value.deserializer.encoding", "UTF-8");
|
||||
config.setProperty("experimental.snapshot.enable", "true");
|
||||
|
||||
this.consumer = new TaosConsumer<>(config);
|
||||
this.topics = Collections.singletonList("topic_speed");
|
||||
|
|
|
@ -797,7 +797,7 @@ TDengine Go 连接器支持订阅功能,应用 API 如下:
|
|||
```go
|
||||
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"auto.offset.reset": "latest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
|
@ -873,6 +873,7 @@ package main
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/taosdata/driver-go/v3/af"
|
||||
"github.com/taosdata/driver-go/v3/af/tmq"
|
||||
|
@ -893,19 +894,16 @@ func main() {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_client",
|
||||
"enable.auto.commit": "false",
|
||||
"msg.with.table.name": "true",
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "latest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_client",
|
||||
"enable.auto.commit": "false",
|
||||
"msg.with.table.name": "true",
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -918,10 +916,16 @@ func main() {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
ev := consumer.Poll(500)
|
||||
if ev != nil {
|
||||
|
@ -975,6 +979,7 @@ package main
|
|||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/taosdata/driver-go/v3/common"
|
||||
tmqcommon "github.com/taosdata/driver-go/v3/common/tmq"
|
||||
|
@ -998,7 +1003,7 @@ func main() {
|
|||
"td.connect.pass": "taosdata",
|
||||
"group.id": "example",
|
||||
"client.id": "example_consumer",
|
||||
"auto.offset.reset": "earliest",
|
||||
"auto.offset.reset": "latest",
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -1007,29 +1012,34 @@ func main() {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
_, err = db.Exec("create table example_ws_tmq.t_all(ts timestamp," +
|
||||
"c1 bool," +
|
||||
"c2 tinyint," +
|
||||
"c3 smallint," +
|
||||
"c4 int," +
|
||||
"c5 bigint," +
|
||||
"c6 tinyint unsigned," +
|
||||
"c7 smallint unsigned," +
|
||||
"c8 int unsigned," +
|
||||
"c9 bigint unsigned," +
|
||||
"c10 float," +
|
||||
"c11 double," +
|
||||
"c12 binary(20)," +
|
||||
"c13 nchar(20)" +
|
||||
")")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
go func() {
|
||||
_, err := db.Exec("create table example_ws_tmq.t_all(ts timestamp," +
|
||||
"c1 bool," +
|
||||
"c2 tinyint," +
|
||||
"c3 smallint," +
|
||||
"c4 int," +
|
||||
"c5 bigint," +
|
||||
"c6 tinyint unsigned," +
|
||||
"c7 smallint unsigned," +
|
||||
"c8 int unsigned," +
|
||||
"c9 bigint unsigned," +
|
||||
"c10 float," +
|
||||
"c11 double," +
|
||||
"c12 binary(20)," +
|
||||
"c13 nchar(20)" +
|
||||
")")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
for {
|
||||
_, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
}
|
||||
|
||||
}()
|
||||
for i := 0; i < 5; i++ {
|
||||
ev := consumer.Poll(500)
|
||||
|
|
|
@ -447,7 +447,7 @@ consumer.unsubscribe().await;
|
|||
|
||||
- `group.id`: 同一个消费者组,将以至少消费一次的方式进行消息负载均衡。
|
||||
- `client.id`: 可选的订阅客户端识别项。
|
||||
- `auto.offset.reset`: 可选初始化订阅起点, *earliest* 为从头开始订阅, *latest* 为仅从最新数据开始订阅,默认为从头订阅。注意,此选项在同一个 `group.id` 中仅生效一次。
|
||||
- `auto.offset.reset`: 可选初始化订阅起点, *earliest* 为从头开始订阅, *latest* 为仅从最新数据开始订阅,默认值根据 TDengine 版本有所不同,详细参见 [数据订阅](https://docs.taosdata.com/develop/tmq/)。注意,此选项在同一个 `group.id` 中仅生效一次。
|
||||
- `enable.auto.commit`: 当设置为 `true` 时,将启用自动标记模式,当对数据一致性不敏感时,可以启用此方式。
|
||||
- `auto.commit.interval.ms`: 自动标记的时间间隔。
|
||||
|
||||
|
|
|
@ -33,11 +33,13 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con
|
|||
|
||||
|Python Connector 版本|主要变化|
|
||||
|:-------------------:|:----:|
|
||||
|2.7.12|1. 新增 varbinary 类型支持(STMT暂不支持 varbinary )<br/> 2. query 性能提升(感谢贡献者[hadrianl](https://github.com/taosdata/taos-connector-python/pull/209))|
|
||||
|2.7.9|数据订阅支持获取消费进度和重置消费进度|
|
||||
|2.7.8|新增 `execute_many`|
|
||||
|
||||
|Python Websocket Connector 版本|主要变化|
|
||||
|:----------------------------:|:-----:|
|
||||
|0.2.9|已知问题修复|
|
||||
|0.2.5|1. 数据订阅支持获取消费进度和重置消费进度 <br/> 2. 支持 schemaless <br/> 3. 支持 STMT|
|
||||
|0.2.4|数据订阅新增取消订阅方法|
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ database_option: {
|
|||
- WAL_FSYNC_PERIOD:当 WAL 参数设置为 2 时,落盘的周期。默认为 3000,单位毫秒。最小为 0,表示每次写入立即落盘;最大为 180000,即三分钟。
|
||||
- MAXROWS:文件块中记录的最大条数,默认为 4096 条。
|
||||
- MINROWS:文件块中记录的最小条数,默认为 100 条。
|
||||
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 <= keep 1 <= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。
|
||||
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于3倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 <= keep 1 <= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。
|
||||
- PAGES:一个 VNODE 中元数据存储引擎的缓存页个数,默认为 256,最小 64。一个 VNODE 元数据存储占用 PAGESIZE \* PAGES,默认情况下为 1MB 内存。
|
||||
- PAGESIZE:一个 VNODE 中元数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB 到 16 MB。
|
||||
- PRECISION:数据库的时间戳精度。ms 表示毫秒,us 表示微秒,ns 表示纳秒,默认 ms 毫秒。
|
||||
|
|
|
@ -24,7 +24,7 @@ SELECT [hints] [DISTINCT] select_list
|
|||
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
|
||||
|
||||
hint:
|
||||
BATCH_SCAN | NO_BATCH_SCAN
|
||||
BATCH_SCAN | NO_BATCH_SCAN | SORT_FOR_GROUP
|
||||
|
||||
select_list:
|
||||
select_expr [, select_expr] ...
|
||||
|
@ -87,15 +87,17 @@ Hints 是用户控制单个语句查询优化的一种手段,当 Hint 不适
|
|||
|
||||
目前支持的 Hints 列表如下:
|
||||
|
||||
| **Hint** | **参数** | **说明** | **适用范围** |
|
||||
| :-----------: | -------------- | -------------------------- | -------------------------- |
|
||||
| BATCH_SCAN | 无 | 采用批量读表的方式 | 超级表 JOIN 语句 |
|
||||
| NO_BATCH_SCAN | 无 | 采用顺序读表的方式 | 超级表 JOIN 语句 |
|
||||
| **Hint** | **参数** | **说明** | **适用范围** |
|
||||
| :-----------: | -------------- | -------------------------- | -----------------------------|
|
||||
| BATCH_SCAN | 无 | 采用批量读表的方式 | 超级表 JOIN 语句 |
|
||||
| NO_BATCH_SCAN | 无 | 采用顺序读表的方式 | 超级表 JOIN 语句 |
|
||||
| SORT_FOR_GROUP| 无 | 采用sort方式进行分组 | partition by 列表有普通列时 |
|
||||
|
||||
举例:
|
||||
|
||||
```sql
|
||||
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
|
||||
SELECT /*+ SORT_FOR_GROUP() */ count(*), c1 FROM stable1 PARTITION BY c1;
|
||||
```
|
||||
|
||||
## 列表
|
||||
|
|
|
@ -54,6 +54,7 @@ LIKE 条件使用通配符字符串进行匹配检查,规则如下:
|
|||
MATCH 条件和 NMATCH 条件使用正则表达式进行匹配,规则如下:
|
||||
|
||||
- 支持符合 POSIX 规范的正则表达式,具体规范内容可参见 Regular Expressions。
|
||||
- MATCH 和正则表达式匹配时, 返回 TURE. NMATCH 和正则表达式不匹配时, 返回 TRUE.
|
||||
- 只能针对子表名(即 tbname)、字符串类型的标签值进行正则表达式过滤,不支持普通列的过滤。
|
||||
- 正则匹配字符串长度不能超过 128 字节。可以通过参数 maxRegexStringLen 设置和调整最大允许的正则匹配字符串,该参数是客户端配置参数,需要重启客户端才能生效
|
||||
|
||||
|
|
|
@ -180,6 +180,7 @@ description: TDengine 保留关键字的详细列表
|
|||
- MAX_DELAY
|
||||
- BWLIMIT
|
||||
- MAXROWS
|
||||
- MAX_SPEED
|
||||
- MERGE
|
||||
- META
|
||||
- MINROWS
|
||||
|
|
|
@ -26,7 +26,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
## INS_DNODES
|
||||
|
||||
提供 dnode 的相关信息。也可以使用 SHOW DNODES 来查询这些信息。
|
||||
提供 dnode 的相关信息。也可以使用 SHOW DNODES 来查询这些信息。 SYSINFO 为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------------: | ------------ | ----------------------------------------------------------------------------------------------------- |
|
||||
|
@ -40,7 +40,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
## INS_MNODES
|
||||
|
||||
提供 mnode 的相关信息。也可以使用 SHOW MNODES 来查询这些信息。
|
||||
提供 mnode 的相关信息。也可以使用 SHOW MNODES 来查询这些信息。 SYSINFO 为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | ------------------ |
|
||||
|
@ -52,22 +52,33 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
## INS_QNODES
|
||||
|
||||
当前系统中 QNODE 的信息。也可以使用 SHOW QNODES 来查询这些信息。
|
||||
当前系统中 QNODE 的信息。也可以使用 SHOW QNODES 来查询这些信息。SYSINFO 属性为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | ------------ |
|
||||
| 1 | id | SMALLINT | qnode id |
|
||||
| 2 | endpoint | BINARY(134) | qnode 的地址 |
|
||||
| 2 | endpoint | VARCHAR(134) | qnode 的地址 |
|
||||
| 3 | create_time | TIMESTAMP | 创建时间 |
|
||||
|
||||
## INS_SNODES
|
||||
|
||||
当前系统中 SNODE 的信息。也可以使用 SHOW SNODES 来查询这些信息。SYSINFO 属性为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | ------------ |
|
||||
| 1 | id | SMALLINT | snode id |
|
||||
| 2 | endpoint | VARCHAR(134) | snode 的地址 |
|
||||
| 3 | create_time | TIMESTAMP | 创建时间 |
|
||||
|
||||
|
||||
## INS_CLUSTER
|
||||
|
||||
存储集群相关信息。
|
||||
存储集群相关信息。 SYSINFO 属性为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | ---------- |
|
||||
| 1 | id | BIGINT | cluster id |
|
||||
| 2 | name | BINARY(134) | 集群名称 |
|
||||
| 2 | name | VARCHAR(134) | 集群名称 |
|
||||
| 3 | create_time | TIMESTAMP | 创建时间 |
|
||||
|
||||
## INS_DATABASES
|
||||
|
@ -76,25 +87,25 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------------------: | ---------------- | ------------------------------------------------ |
|
||||
| 1 | name | BINARY(32) | 数据库名 |
|
||||
| 1 | name | VARCHAR(64) | 数据库名 |
|
||||
| 2 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 3 | ntables | INT | 数据库中表的数量,包含子表和普通表但不包含超级表 |
|
||||
| 4 | vgroups | INT | 数据库中有多少个 vgroup。需要注意,`vgroups` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 6 | replica | INT | 副本数。需要注意,`replica` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 7 | strict | BINARY(4) | 废弃参数 |
|
||||
| 8 | duration | INT | 单文件存储数据的时间跨度。需要注意,`duration` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | keep | INT | 数据保留时长。需要注意,`keep` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 7 | strict | VARCHAR(4) | 废弃参数 |
|
||||
| 8 | duration | VARCHAR(10) | 单文件存储数据的时间跨度。需要注意,`duration` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | keep | VARCHAR(32) | 数据保留时长。需要注意,`keep` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 10 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB。需要注意,`buffer` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 11 | pagesize | INT | 每个 VNODE 中元数据存储引擎的页大小,单位为 KB。需要注意,`pagesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 12 | pages | INT | 每个 vnode 元数据存储引擎的缓存页个数。需要注意,`pages` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 13 | minrows | INT | 文件块中记录的最大条数。需要注意,`minrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 14 | maxrows | INT | 文件块中记录的最小条数。需要注意,`maxrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 15 | comp | INT | 数据压缩方式。需要注意,`comp` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 16 | precision | BINARY(2) | 时间分辨率。需要注意,`precision` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 17 | status | BINARY(10) | 数据库状态 |
|
||||
| 18 | retentions | BINARY (60) | 数据的聚合周期和保存时长。需要注意,`retentions` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 16 | precision | VARCHAR(2) | 时间分辨率。需要注意,`precision` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 17 | status | VARCHAR(10) | 数据库状态 |
|
||||
| 18 | retentions | VARCHAR(60) | 数据的聚合周期和保存时长。需要注意,`retentions` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 19 | single_stable | BOOL | 表示此数据库中是否只可以创建一个超级表。需要注意,`single_stable` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 20 | cachemodel | BINARY(60) | 表示是否在内存中缓存子表的最近数据。需要注意,`cachemodel` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 20 | cachemodel | VARCHAR(60) | 表示是否在内存中缓存子表的最近数据。需要注意,`cachemodel` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 21 | cachesize | INT | 表示每个 vnode 中用于缓存子表最近数据的内存大小。需要注意,`cachesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 22 | wal_level | INT | WAL 级别。需要注意,`wal_level` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 23 | wal_fsync_period | INT | 数据落盘周期。需要注意,`wal_fsync_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
@ -111,15 +122,15 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :-----------: | ------------- | --------------------------------------------------------------------------------------------- |
|
||||
| 1 | name | BINARY(64) | 函数名 |
|
||||
| 2 | comment | BINARY(255) | 补充说明。需要注意,`comment` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 1 | name | VARCHAR(64) | 函数名 |
|
||||
| 2 | comment | VARCHAR(255) | 补充说明。需要注意,`comment` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 3 | aggregate | INT | 是否为聚合函数。需要注意,`aggregate` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 4 | output_type | BINARY(31) | 输出类型 |
|
||||
| 4 | output_type | VARCHAR(31) | 输出类型 |
|
||||
| 5 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 6 | code_len | INT | 代码长度 |
|
||||
| 7 | bufsize | INT | buffer 大小 |
|
||||
| 8 | func_language | BINARY(31) | 自定义函数编程语言 |
|
||||
| 9 | func_body | BINARY(16384) | 函数体定义 |
|
||||
| 8 | func_language | VARCHAR(31) | 自定义函数编程语言 |
|
||||
| 9 | func_body | VARCHAR(16384) | 函数体定义 |
|
||||
| 10 | func_version | INT | 函数版本号。初始版本为0,每次替换更新,版本号加1。 |
|
||||
|
||||
|
||||
|
@ -129,12 +140,12 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :--------------: | ------------ | ------------------------------------------------------- |
|
||||
| 1 | db_name | BINARY(32) | 包含此索引的表所在的数据库名 |
|
||||
| 2 | table_name | BINARY(192) | 包含此索引的表的名称 |
|
||||
| 3 | index_name | BINARY(192) | 索引名 |
|
||||
| 4 | column_name | BINARY(64) | 建索引的列的列名 |
|
||||
| 5 | index_type | BINARY(10) | 目前有 SMA 和 tag |
|
||||
| 6 | index_extensions | BINARY(256) | 索引的额外信息。对 SMA/tag 类型的索引,是函数名的列表。 |
|
||||
| 1 | db_name | VARCHAR(32) | 包含此索引的表所在的数据库名 |
|
||||
| 2 | table_name | VARCHAR(192) | 包含此索引的表的名称 |
|
||||
| 3 | index_name | VARCHAR(192) | 索引名 |
|
||||
| 4 | column_name | VARCHAR(64) | 建索引的列的列名 |
|
||||
| 5 | index_type | VARCHAR(10) | 目前有 SMA 和 tag |
|
||||
| 6 | index_extensions | VARCHAR(256) | 索引的额外信息。对 SMA/tag 类型的索引,是函数名的列表。 |
|
||||
|
||||
## INS_STABLES
|
||||
|
||||
|
@ -142,16 +153,16 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :-----------: | ------------ | ----------------------------------------------------------------------------------------------------- |
|
||||
| 1 | stable_name | BINARY(192) | 超级表表名 |
|
||||
| 2 | db_name | BINARY(64) | 超级表所在的数据库的名称 |
|
||||
| 1 | stable_name | VARCHAR(192) | 超级表表名 |
|
||||
| 2 | db_name | VARCHAR(64) | 超级表所在的数据库的名称 |
|
||||
| 3 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 4 | columns | INT | 列数目 |
|
||||
| 5 | tags | INT | 标签数目。需要注意,`tags` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 6 | last_update | TIMESTAMP | 最后更新时间 |
|
||||
| 7 | table_comment | BINARY(1024) | 表注释 |
|
||||
| 8 | watermark | BINARY(64) | 窗口的关闭时间。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | max_delay | BINARY(64) | 推送计算结果的最大延迟。需要注意,`max_delay` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 10 | rollup | BINARY(128) | rollup 聚合函数。需要注意,`rollup` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 7 | table_comment | VARCHAR(1024) | 表注释 |
|
||||
| 8 | watermark | VARCHAR(64) | 窗口的关闭时间。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | max_delay | VARCHAR(64) | 推送计算结果的最大延迟。需要注意,`max_delay` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 10 | rollup | VARCHAR(128) | rollup 聚合函数。需要注意,`rollup` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
||||
## INS_TABLES
|
||||
|
||||
|
@ -159,37 +170,37 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :-----------: | ------------ | ------------------------------------------------------------------------------------- |
|
||||
| 1 | table_name | BINARY(192) | 表名 |
|
||||
| 2 | db_name | BINARY(64) | 数据库名 |
|
||||
| 1 | table_name | VARCHAR(192) | 表名 |
|
||||
| 2 | db_name | VARCHAR(64) | 数据库名 |
|
||||
| 3 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 4 | columns | INT | 列数目 |
|
||||
| 5 | stable_name | BINARY(192) | 所属的超级表表名 |
|
||||
| 5 | stable_name | VARCHAR(192) | 所属的超级表表名 |
|
||||
| 6 | uid | BIGINT | 表 id |
|
||||
| 7 | vgroup_id | INT | vgroup id |
|
||||
| 8 | ttl | INT | 表的生命周期。需要注意,`ttl` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | table_comment | BINARY(1024) | 表注释 |
|
||||
| 10 | type | BINARY(21) | 表类型 |
|
||||
| 9 | table_comment | VARCHAR(1024) | 表注释 |
|
||||
| 10 | type | VARCHAR(21) | 表类型 |
|
||||
|
||||
## INS_TAGS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------- | ---------------------- |
|
||||
| 1 | table_name | BINARY(192) | 表名 |
|
||||
| 2 | db_name | BINARY(64) | 该表所在的数据库的名称 |
|
||||
| 3 | stable_name | BINARY(192) | 所属的超级表表名 |
|
||||
| 4 | tag_name | BINARY(64) | tag 的名称 |
|
||||
| 5 | tag_type | BINARY(64) | tag 的类型 |
|
||||
| 6 | tag_value | BINARY(16384) | tag 的值 |
|
||||
| 1 | table_name | VARCHAR(192) | 表名 |
|
||||
| 2 | db_name | VARCHAR(64) | 该表所在的数据库的名称 |
|
||||
| 3 | stable_name | VARCHAR(192) | 所属的超级表表名 |
|
||||
| 4 | tag_name | VARCHAR(64) | tag 的名称 |
|
||||
| 5 | tag_type | VARCHAR(64) | tag 的类型 |
|
||||
| 6 | tag_value | VARCHAR(16384) | tag 的值 |
|
||||
|
||||
## INS_COLUMNS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :-----------: | ------------ | ---------------------- |
|
||||
| 1 | table_name | BINARY(192) | 表名 |
|
||||
| 2 | db_name | BINARY(64) | 该表所在的数据库的名称 |
|
||||
| 3 | table_type | BINARY(21) | 表类型 |
|
||||
| 4 | col_name | BINARY(64) | 列 的名称 |
|
||||
| 5 | col_type | BINARY(32) | 列 的类型 |
|
||||
| 1 | table_name | VARCHAR(192) | 表名 |
|
||||
| 2 | db_name | VARCHAR(64) | 该表所在的数据库的名称 |
|
||||
| 3 | table_type | VARCHAR(21) | 表类型 |
|
||||
| 4 | col_name | VARCHAR(64) | 列 的名称 |
|
||||
| 5 | col_type | VARCHAR(32) | 列 的类型 |
|
||||
| 6 | col_length | INT | 列 的长度 |
|
||||
| 7 | col_precision | INT | 列 的精度 |
|
||||
| 8 | col_scale | INT | 列 的比例 |
|
||||
|
@ -197,51 +208,51 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
## INS_USERS
|
||||
|
||||
提供系统中创建的用户的相关信息。
|
||||
提供系统中创建的用户的相关信息. SYSINFO 属性为0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | -------- |
|
||||
| 1 | user_name | BINARY(23) | 用户名 |
|
||||
| 2 | privilege | BINARY(256) | 权限 |
|
||||
| 1 | user_name | VARCHAR(23) | 用户名 |
|
||||
| 2 | privilege | VARCHAR(256) | 权限 |
|
||||
| 3 | create_time | TIMESTAMP | 创建时间 |
|
||||
|
||||
## INS_GRANTS
|
||||
|
||||
提供企业版授权的相关信息。
|
||||
提供企业版授权的相关信息。SYSINFO 属性为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | --------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | version | BINARY(9) | 企业版授权说明:official(官方授权的)/trial(试用的) |
|
||||
| 2 | cpu_cores | BINARY(9) | 授权使用的 CPU 核心数量 |
|
||||
| 3 | dnodes | BINARY(10) | 授权使用的 dnode 节点数量。需要注意,`dnodes` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 4 | streams | BINARY(10) | 授权创建的流数量。需要注意,`streams` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 5 | users | BINARY(10) | 授权创建的用户数量。需要注意,`users` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 6 | accounts | BINARY(10) | 授权创建的帐户数量。需要注意,`accounts` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 7 | storage | BINARY(21) | 授权使用的存储空间大小。需要注意,`storage` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 8 | connections | BINARY(21) | 授权使用的客户端连接数量。需要注意,`connections` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | databases | BINARY(11) | 授权使用的数据库数量。需要注意,`databases` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 10 | speed | BINARY(9) | 授权使用的数据点每秒写入数量 |
|
||||
| 11 | querytime | BINARY(9) | 授权使用的查询总时长 |
|
||||
| 12 | timeseries | BINARY(21) | 授权使用的测点数量 |
|
||||
| 13 | expired | BINARY(5) | 是否到期,true:到期,false:未到期 |
|
||||
| 14 | expire_time | BINARY(19) | 试用期到期时间 |
|
||||
| 1 | version | VARCHAR(9) | 企业版授权说明:official(官方授权的)/trial(试用的) |
|
||||
| 2 | cpu_cores | VARCHAR(9) | 授权使用的 CPU 核心数量 |
|
||||
| 3 | dnodes | VARCHAR(10) | 授权使用的 dnode 节点数量。需要注意,`dnodes` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 4 | streams | VARCHAR(10) | 授权创建的流数量。需要注意,`streams` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 5 | users | VARCHAR(10) | 授权创建的用户数量。需要注意,`users` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 6 | accounts | VARCHAR(10) | 授权创建的帐户数量。需要注意,`accounts` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 7 | storage | VARCHAR(21) | 授权使用的存储空间大小。需要注意,`storage` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 8 | connections | VARCHAR(21) | 授权使用的客户端连接数量。需要注意,`connections` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | databases | VARCHAR(11) | 授权使用的数据库数量。需要注意,`databases` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 10 | speed | VARCHAR(9) | 授权使用的数据点每秒写入数量 |
|
||||
| 11 | querytime | VARCHAR(9) | 授权使用的查询总时长 |
|
||||
| 12 | timeseries | VARCHAR(21) | 授权使用的测点数量 |
|
||||
| 13 | expired | VARCHAR(5) | 是否到期,true:到期,false:未到期 |
|
||||
| 14 | expire_time | VARCHAR(19) | 试用期到期时间 |
|
||||
|
||||
## INS_VGROUPS
|
||||
|
||||
系统中所有 vgroups 的信息。
|
||||
系统中所有 vgroups 的信息。SYSINFO 属性为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :-------: | ------------ | ------------------------------------------------------------------------------------------------ |
|
||||
| 1 | vgroup_id | INT | vgroup id |
|
||||
| 2 | db_name | BINARY(32) | 数据库名 |
|
||||
| 2 | db_name | VARCHAR(32) | 数据库名 |
|
||||
| 3 | tables | INT | 此 vgroup 内有多少表。需要注意,`tables` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 4 | status | BINARY(10) | 此 vgroup 的状态 |
|
||||
| 4 | status | VARCHAR(10) | 此 vgroup 的状态 |
|
||||
| 5 | v1_dnode | INT | 第一个成员所在的 dnode 的 id |
|
||||
| 6 | v1_status | BINARY(10) | 第一个成员的状态 |
|
||||
| 6 | v1_status | VARCHAR(10) | 第一个成员的状态 |
|
||||
| 7 | v2_dnode | INT | 第二个成员所在的 dnode 的 id |
|
||||
| 8 | v2_status | BINARY(10) | 第二个成员的状态 |
|
||||
| 8 | v2_status | VARCHAR(10) | 第二个成员的状态 |
|
||||
| 9 | v3_dnode | INT | 第三个成员所在的 dnode 的 id |
|
||||
| 10 | v3_status | BINARY(10) | 第三个成员的状态 |
|
||||
| 10 | v3_status | VARCHAR(10) | 第三个成员的状态 |
|
||||
| 11 | nfiles | INT | 此 vgroup 中数据/元数据文件的数量 |
|
||||
| 12 | file_size | INT | 此 vgroup 中数据/元数据文件的大小 |
|
||||
| 13 | tsma | TINYINT | 此 vgroup 是否专用于 Time-range-wise SMA,1: 是, 0: 否 |
|
||||
|
@ -252,55 +263,57 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------: | ------------ | --------------------------------------------------------------------------------------- |
|
||||
| 1 | name | BINARY(32) | 配置项名称 |
|
||||
| 2 | value | BINARY(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 1 | name | VARCHAR(32) | 配置项名称 |
|
||||
| 2 | value | VARCHAR(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
||||
## INS_DNODE_VARIABLES
|
||||
|
||||
系统中每个 dnode 的配置参数。
|
||||
系统中每个 dnode 的配置参数。SYSINFO 属性 为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------: | ------------ | --------------------------------------------------------------------------------------- |
|
||||
| 1 | dnode_id | INT | dnode 的 ID |
|
||||
| 2 | name | BINARY(32) | 配置项名称 |
|
||||
| 3 | value | BINARY(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 2 | name | VARCHAR(32) | 配置项名称 |
|
||||
| 3 | value | VARCHAR(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
||||
## INS_TOPICS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | ------------------------------ |
|
||||
| 1 | topic_name | BINARY(192) | topic 名称 |
|
||||
| 2 | db_name | BINARY(64) | topic 相关的 DB |
|
||||
| 1 | topic_name | VARCHAR(192) | topic 名称 |
|
||||
| 2 | db_name | VARCHAR(64) | topic 相关的 DB |
|
||||
| 3 | create_time | TIMESTAMP | topic 的 创建时间 |
|
||||
| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 |
|
||||
| 4 | sql | VARCHAR(1024) | 创建该 topic 时所用的 SQL 语句 |
|
||||
|
||||
## INS_SUBSCRIPTIONS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------------: | ------------ | ------------------------ |
|
||||
| 1 | topic_name | BINARY(204) | 被订阅的 topic |
|
||||
| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 |
|
||||
| 1 | topic_name | VARCHAR(204) | 被订阅的 topic |
|
||||
| 2 | consumer_group | VARCHAR(193) | 订阅者的消费者组 |
|
||||
| 3 | vgroup_id | INT | 消费者被分配的 vgroup id |
|
||||
| 4 | consumer_id | BIGINT | 消费者的唯一 id |
|
||||
| 5 | offset | BINARY(64) | 消费者的消费进度 |
|
||||
| 5 | offset | VARCHAR(64) | 消费者的消费进度 |
|
||||
| 6 | rows | BIGINT | 消费者的消费的数据条数 |
|
||||
|
||||
## INS_STREAMS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :----------: | ------------ | -------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | stream_name | BINARY(64) | 流计算名称 |
|
||||
| 1 | stream_name | VARCHAR(64) | 流计算名称 |
|
||||
| 2 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 |
|
||||
| 4 | status | BINARY(20) | 流当前状态 |
|
||||
| 5 | source_db | BINARY(64) | 源数据库 |
|
||||
| 6 | target_db | BINARY(64) | 目的数据库 |
|
||||
| 7 | target_table | BINARY(192) | 流计算写入的目标表 |
|
||||
| 3 | sql | VARCHAR(1024) | 创建流计算时提供的 SQL 语句 |
|
||||
| 4 | status | VARCHAR(20) | 流当前状态 |
|
||||
| 5 | source_db | VARCHAR(64) | 源数据库 |
|
||||
| 6 | target_db | VARCHAR(64) | 目的数据库 |
|
||||
| 7 | target_table | VARCHAR(192) | 流计算写入的目标表 |
|
||||
| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算。需要注意,`trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
||||
## INS_USER_PRIVILEGES
|
||||
|
||||
注:SYSINFO 属性为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :----------: | ------------ | -------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | user_name | VARCHAR(24) | 用户名
|
||||
|
|
|
@ -73,10 +73,10 @@ SHOW CREATE TABLE [db_name.]tb_name
|
|||
## SHOW DATABASES
|
||||
|
||||
```sql
|
||||
SHOW DATABASES;
|
||||
SHOW [USER | SYSTEM] DATABASES;
|
||||
```
|
||||
|
||||
显示用户定义的所有数据库。
|
||||
显示定义的所有数据库。SYSTEM 指定只显示系统数据库。USER 指定只显示用户创建的数据库。
|
||||
|
||||
## SHOW DNODES
|
||||
|
||||
|
@ -183,10 +183,10 @@ SHOW SUBSCRIPTIONS;
|
|||
## SHOW TABLES
|
||||
|
||||
```sql
|
||||
SHOW [db_name.]TABLES [LIKE 'pattern'];
|
||||
SHOW [NORMAL | CHILD] [db_name.]TABLES [LIKE 'pattern'];
|
||||
```
|
||||
|
||||
显示当前数据库下的所有普通表和子表的信息。可以使用 LIKE 对表名进行模糊匹配。
|
||||
显示当前数据库下的所有普通表和子表的信息。可以使用 LIKE 对表名进行模糊匹配。NORMAL 指定只显示普通表信息, CHILD 指定只显示子表信息。
|
||||
|
||||
## SHOW TABLE DISTRIBUTED
|
||||
|
||||
|
|
|
@ -395,6 +395,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
|||
### 查询场景配置参数
|
||||
|
||||
查询场景下 `filetype` 必须设置为 `query`。
|
||||
`query_times` 指定运行查询的次数,数值类型
|
||||
|
||||
查询场景可以通过设置 `kill_slow_query_threshold` 和 `kill_slow_query_interval` 参数来控制杀掉慢查询语句的执行,threshold 控制如果 exec_usec 超过指定时间的查询将被 taosBenchmark 杀掉,单位为秒;interval 控制休眠时间,避免持续查询慢查询消耗 CPU ,单位为秒。
|
||||
|
||||
|
|
|
@ -106,7 +106,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
use letter and number only. Default is NOT.
|
||||
-n, --no-escape No escape char '`'. Default is using it.
|
||||
-Q, --dot-replace Repalce dot character with underline character in
|
||||
the table name.
|
||||
the table name.(Version 2.5.3)
|
||||
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
|
||||
8.
|
||||
-C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service
|
||||
|
@ -116,6 +116,10 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
-?, --help Give this help list
|
||||
--usage Give a short usage message
|
||||
-V, --version Print program version
|
||||
-W, --rename=RENAME-LIST Rename database name with new name during
|
||||
importing data. RENAME-LIST:
|
||||
"db1=newDB1|db2=newDB2" means rename db1 to newDB1
|
||||
and rename db2 to newDB2 (Version 2.5.4)
|
||||
|
||||
Mandatory or optional arguments to long options are also mandatory or optional
|
||||
for any corresponding short options.
|
||||
|
|
|
@ -648,7 +648,16 @@ charset 的有效值是 UTF-8。
|
|||
| 适用范围 | 仅客户端适用 |
|
||||
| 含义 | schemaless 自定义的子表名的 key |
|
||||
| 类型 | 字符串 |
|
||||
| 缺省值 | 无 |
|
||||
| 缺省值 | 无
|
||||
|
||||
### smlAutoChildTableNameDelimiter
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------- |
|
||||
| 适用范围 | 仅客户端适用 |
|
||||
| 含义 | schemaless tag之间的连接符,连起来作为子表名 |
|
||||
| 类型 | 字符串 |
|
||||
| 缺省值 | 无 |
|
||||
|
||||
### smlTagName
|
||||
|
||||
|
@ -716,16 +725,6 @@ charset 的有效值是 UTF-8。
|
|||
| 取值范围 | 0: 不改变;1:改变 |
|
||||
| 缺省值 | 0 |
|
||||
|
||||
### keepTimeOffset
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 迁移操作的延时 |
|
||||
| 单位 | 小时 |
|
||||
| 取值范围 | 0-23 |
|
||||
| 缺省值 | 0 |
|
||||
|
||||
### tmqMaxTopicNum
|
||||
|
||||
| 属性 | 说明 |
|
||||
|
@ -803,7 +802,7 @@ charset 的有效值是 UTF-8。
|
|||
| 53 | udf | 是 | 是 | |
|
||||
| 54 | enableCoreFile | 是 | 是 | |
|
||||
| 55 | ttlChangeOnWrite | 否 | 是 | |
|
||||
| 56 | keepTimeOffset | 是 | 是 | |
|
||||
| 56 | keepTimeOffset | 否 | 是(从3.2.0.0开始,该配置废弃) | |
|
||||
|
||||
## 2.x->3.0 的废弃参数
|
||||
|
||||
|
|
|
@ -94,8 +94,11 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
|
|||
:::tip
|
||||
需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序,而是使用了标签名称按照字符串升序排列后的结果。所以,tag_key1 并不是在行协议中输入的第一个标签。
|
||||
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名:“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
|
||||
:::tip
|
||||
为了让用户可以指定生成的表名,可以通过在taos.cfg里配置 smlChildTableName 参数来指定。
|
||||
:::tip
|
||||
如果不想用自动生成的表名,有两种指定子表名的方式,第一种优先级更高:
|
||||
通过在taos.cfg里配置 smlAutoChildTableNameDelimiter 参数来指定。
|
||||
举例如下:配置 smlAutoChildTableNameDelimiter=- 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1-4。
|
||||
通过在taos.cfg里配置 smlChildTableName 参数来指定。
|
||||
举例如下:配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
|
||||
|
||||
2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)。
|
||||
|
|
|
@ -9,8 +9,6 @@ TDengine 通过 [taosKeeper](/reference/taosKeeper/) 将服务器的 CPU、内
|
|||
|
||||
## TDinsight - 使用监控数据库 + Grafana 对 TDengine 进行监控的解决方案
|
||||
|
||||
监控数据库将提供更多的监控项,您可以从 [TDinsight Grafana Dashboard](/reference/tdinsight/) 了解如何使用 TDinsight 方案对 TDengine 进行监控。
|
||||
|
||||
我们提供了一个自动化脚本 `TDinsight.sh` 对 TDinsight 进行部署。
|
||||
|
||||
下载 `TDinsight.sh`:
|
||||
|
@ -37,8 +35,6 @@ chmod +x TDinsight.sh
|
|||
|
||||
运行程序并重启 Grafana 服务,打开面板:`http://localhost:3000/d/tdinsight`。
|
||||
|
||||
更多使用场景和限制请参考[TDinsight](/reference/tdinsight/) 文档。
|
||||
|
||||
## log 库
|
||||
|
||||
TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,可以在 taoskeeper 配置文件中修改,具体参考 [taoskeeper 文档](/reference/taosKeeper))。taoskeeper 启动后会自动创建 log 库,并将监控数据写入到该数据库中。
|
||||
|
@ -102,22 +98,22 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|uptime|FLOAT||dnode uptime|
|
||||
|uptime|FLOAT||dnode uptime,单位:天|
|
||||
|cpu\_engine|FLOAT||taosd cpu 使用率,从 `/proc/<taosd_pid>/stat` 读取|
|
||||
|cpu\_system|FLOAT||服务器 cpu 使用率,从 `/proc/stat` 读取|
|
||||
|cpu\_cores|FLOAT||服务器 cpu 核数|
|
||||
|mem\_engine|INT||taosd 内存使用率,从 `/proc/<taosd_pid>/status` 读取|
|
||||
|mem\_system|INT||服务器可用内存|
|
||||
|mem\_system|INT||服务器可用内存,单位 KB|
|
||||
|mem\_total|INT||服务器内存总量,单位 KB|
|
||||
|disk\_engine|INT|||
|
||||
|disk\_engine|INT||单位 bytes|
|
||||
|disk\_used|BIGINT||data dir 挂载的磁盘使用量,单位 bytes|
|
||||
|disk\_total|BIGINT||data dir 挂载的磁盘总容量,单位 bytes|
|
||||
|net\_in|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 received bytes。单位 kb/s|
|
||||
|net\_out|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 transmit bytes。单位 kb/s|
|
||||
|io\_read|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 rchar 与上次数值计算之后,计算得到速度。单位 kb/s|
|
||||
|io\_write|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 wchar 与上次数值计算之后,计算得到速度。单位 kb/s|
|
||||
|io\_read\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 read_bytes。单位 kb/s|
|
||||
|io\_write\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 write_bytes。单位 kb/s|
|
||||
|net\_in|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 received bytes。单位 byte/s|
|
||||
|net\_out|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 transmit bytes。单位 byte/s|
|
||||
|io\_read|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 rchar 与上次数值计算之后,计算得到速度。单位 byte/s|
|
||||
|io\_write|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 wchar 与上次数值计算之后,计算得到速度。单位 byte/s|
|
||||
|io\_read\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 read_bytes。单位 byte/s|
|
||||
|io\_write\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 write_bytes。单位 byte/s|
|
||||
|req\_select|INT||两个间隔内发生的查询请求数目|
|
||||
|req\_select\_rate|FLOAT||两个间隔内的查询请求速度 = `req_select / monitorInterval`|
|
||||
|req\_insert|INT||两个间隔内发生的写入请求,包含的单条数据数目|
|
||||
|
@ -146,9 +142,9 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||data 目录,一般为 `/var/lib/taos`|
|
||||
|level|INT||0、1、2 多级存储级别|
|
||||
|avail|BIGINT||data 目录可用空间|
|
||||
|used|BIGINT||data 目录已使用空间|
|
||||
|total|BIGINT||data 目录空间|
|
||||
|avail|BIGINT||data 目录可用空间。单位 byte|
|
||||
|used|BIGINT||data 目录已使用空间。单位 byte|
|
||||
|total|BIGINT||data 目录空间。单位 byte|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
@ -161,9 +157,9 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||log 目录名,一般为 `/var/log/taos/`|
|
||||
|avail|BIGINT||log 目录可用空间|
|
||||
|used|BIGINT||log 目录已使用空间|
|
||||
|total|BIGINT||log 目录空间|
|
||||
|avail|BIGINT||log 目录可用空间。单位 byte|
|
||||
|used|BIGINT||log 目录已使用空间。单位 byte|
|
||||
|total|BIGINT||log 目录空间。单位 byte|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
@ -176,9 +172,9 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||temp 目录名,一般为 `/tmp/`|
|
||||
|avail|BIGINT||temp 目录可用空间|
|
||||
|used|BIGINT||temp 目录已使用空间|
|
||||
|total|BIGINT||temp 目录空间|
|
||||
|avail|BIGINT||temp 目录可用空间。单位 byte|
|
||||
|used|BIGINT||temp 目录已使用空间。单位 byte|
|
||||
|total|BIGINT||temp 目录空间。单位 byte|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
|
|
@ -14,40 +14,7 @@ Seeq 是制造业和工业互联网(IIOT)高级分析软件。Seeq 支持在
|
|||
|
||||
### Seeq 安装方法
|
||||
|
||||
从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。
|
||||
|
||||
### Seeq Server 安装和启动
|
||||
|
||||
```
|
||||
tar xvzf seeq-server-xxx.tar.gz
|
||||
cd seeq-server-installer
|
||||
sudo ./install
|
||||
|
||||
sudo seeq service enable
|
||||
sudo seeq start
|
||||
```
|
||||
|
||||
### Seeq Data Lab Server 安装和启动
|
||||
|
||||
Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见[Seeq 官方文档](https://support.seeq.com/space/KB/1034059842)。
|
||||
|
||||
```
|
||||
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
|
||||
sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq
|
||||
sudo seeq config set Network/DataLab/Hostname localhost
|
||||
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231)
|
||||
sudo seeq config set Network/Hostname <value> # the host IP or URL of the main Seeq Server
|
||||
|
||||
# If the main Seeq server is configured to listen over HTTPS
|
||||
sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443)
|
||||
|
||||
# If the main Seeq server is NOT configured to listen over HTTPS
|
||||
sudo seeq config set Network/Webserver/Port <value>
|
||||
|
||||
#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server:
|
||||
sudo seeq config set Network/DataLab/Hostname <value> # the host IP (not URL) of the Data Lab server
|
||||
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231
|
||||
```
|
||||
从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见[Seeq 知识库]( https://support.seeq.com/kb/latest/cloud/)。
|
||||
|
||||
## TDengine 本地实例安装方法
|
||||
|
||||
|
|
|
@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.2.0.0
|
||||
|
||||
<Release type="tdengine" version="3.2.0.0" />
|
||||
|
||||
## 3.1.1.0
|
||||
|
||||
<Release type="tdengine" version="3.1.1.0" />
|
||||
|
|
|
@ -44,17 +44,17 @@ OS name: "windows 10", version: "10.0", arch: "amd64", family: "windows"
|
|||
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
|
||||
<!-- 配置本地maven仓库的路径 -->
|
||||
<!-- 配置本地maven仓库的路径 -->
|
||||
<localRepository>D:\apache-maven-localRepository</localRepository>
|
||||
|
||||
<mirrors>
|
||||
<!-- 配置阿里云Maven镜像仓库 -->
|
||||
<mirror>
|
||||
<id>alimaven</id>
|
||||
<name>aliyun maven</name>
|
||||
<url>http://maven.aliyun.com/nexus/content/groups/public/</url>
|
||||
<mirrorOf>central</mirrorOf>
|
||||
</mirror>
|
||||
<mirror>
|
||||
<id>alimaven</id>
|
||||
<name>aliyun maven</name>
|
||||
<url>http://maven.aliyun.com/nexus/content/groups/public/</url>
|
||||
<mirrorOf>central</mirrorOf>
|
||||
</mirror>
|
||||
</mirrors>
|
||||
|
||||
<profiles>
|
||||
|
@ -126,7 +126,7 @@ https://www.taosdata.com/cn/all-downloads/
|
|||
修改client的hosts文件(C:\Windows\System32\drivers\etc\hosts),将server的hostname和ip配置到client的hosts文件中
|
||||
|
||||
```
|
||||
192.168.236.136 td01
|
||||
192.168.236.136 td01
|
||||
```
|
||||
|
||||
配置完成后,在命令行内使用TDengine CLI连接server端
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
go mod init demo
|
||||
go mod tidy
|
||||
go build
|
|
@ -125,7 +125,8 @@ typedef enum {
|
|||
|
||||
typedef enum {
|
||||
TAOS_NOTIFY_PASSVER = 0,
|
||||
TAOS_NOTIFY_WHITELIST_VER = 1
|
||||
TAOS_NOTIFY_WHITELIST_VER = 1,
|
||||
TAOS_NOTIFY_USER_DROPPED = 2,
|
||||
} TAOS_NOTIFY_TYPE;
|
||||
|
||||
#define RET_MSG_LENGTH 1024
|
||||
|
@ -240,6 +241,11 @@ DLL_EXPORT int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param
|
|||
typedef void (*__taos_async_whitelist_fn_t)(void *param, int code, TAOS *taos, int numOfWhiteLists, uint64_t* pWhiteLists);
|
||||
DLL_EXPORT void taos_fetch_whitelist_a(TAOS *taos, __taos_async_whitelist_fn_t fp, void *param);
|
||||
|
||||
typedef enum {
|
||||
TAOS_CONN_MODE_BI = 0,
|
||||
} TAOS_CONN_MODE;
|
||||
|
||||
DLL_EXPORT int taos_set_conn_mode(TAOS* taos, int mode, int value);
|
||||
/* --------------------------schemaless INTERFACE------------------------------- */
|
||||
|
||||
DLL_EXPORT TAOS_RES *taos_schemaless_insert(TAOS *taos, char *lines[], int numLines, int protocol, int precision);
|
||||
|
@ -313,6 +319,7 @@ DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t
|
|||
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); // The current offset is the offset of the last consumed message + 1
|
||||
DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId);
|
||||
|
||||
DLL_EXPORT TAOS *tmq_get_connect(tmq_t *tmq);
|
||||
DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res);
|
||||
DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res);
|
||||
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
|
||||
|
|
|
@ -55,8 +55,8 @@ typedef struct SSessionKey {
|
|||
} SSessionKey;
|
||||
|
||||
typedef struct SVersionRange {
|
||||
uint64_t minVer;
|
||||
uint64_t maxVer;
|
||||
int64_t minVer;
|
||||
int64_t maxVer;
|
||||
} SVersionRange;
|
||||
|
||||
static inline int winKeyCmprImpl(const void* pKey1, const void* pKey2) {
|
||||
|
|
|
@ -108,7 +108,7 @@ int32_t tBufferReserve(SBuffer *pBuffer, int64_t nData, void **ppData);
|
|||
int32_t tRowBuild(SArray *aColVal, const STSchema *pTSchema, SRow **ppRow);
|
||||
int32_t tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal);
|
||||
void tRowDestroy(SRow *pRow);
|
||||
void tRowSort(SArray *aRowP);
|
||||
int32_t tRowSort(SArray *aRowP);
|
||||
int32_t tRowMerge(SArray *aRowP, STSchema *pTSchema, int8_t flag);
|
||||
int32_t tRowUpsertColData(SRow *pRow, STSchema *pTSchema, SColData *aColData, int32_t nColData, int32_t flag);
|
||||
|
||||
|
|
|
@ -145,6 +145,7 @@ extern bool tsUseAdapter;
|
|||
extern int32_t tsMetaCacheMaxSize;
|
||||
extern int32_t tsSlowLogThreshold;
|
||||
extern int32_t tsSlowLogScope;
|
||||
extern int32_t tsTimeSeriesThreshold;
|
||||
|
||||
// client
|
||||
extern int32_t tsMinSlidingTime;
|
||||
|
@ -159,10 +160,11 @@ extern char buildinfo[];
|
|||
|
||||
// lossy
|
||||
extern char tsLossyColumns[];
|
||||
extern double tsFPrecision;
|
||||
extern float tsFPrecision;
|
||||
extern double tsDPrecision;
|
||||
extern uint32_t tsMaxRange;
|
||||
extern uint32_t tsCurRange;
|
||||
extern bool tsIfAdtFse;
|
||||
extern char tsCompressor[];
|
||||
|
||||
// tfs
|
||||
|
@ -177,6 +179,7 @@ extern char tsUdfdLdLibPath[];
|
|||
|
||||
// schemaless
|
||||
extern char tsSmlChildTableName[];
|
||||
extern char tsSmlAutoChildTableNameDelimiter[];
|
||||
extern char tsSmlTagName[];
|
||||
extern bool tsSmlDot2Underline;
|
||||
extern char tsSmlTsDefaultName[];
|
||||
|
@ -206,7 +209,6 @@ extern int32_t tsRpcRetryInterval;
|
|||
extern bool tsDisableStream;
|
||||
extern int64_t tsStreamBufferSize;
|
||||
extern bool tsFilterScalarMode;
|
||||
extern int32_t tsKeepTimeOffset;
|
||||
extern int32_t tsMaxStreamBackendCache;
|
||||
extern int32_t tsPQSortMemThreshold;
|
||||
extern int32_t tsResolveFQDNRetryTime;
|
||||
|
|
|
@ -30,6 +30,8 @@ extern "C" {
|
|||
#define GRANTS_COL_MAX_LEN 196
|
||||
#endif
|
||||
|
||||
#define GRANT_HEART_BEAT_MIN 2
|
||||
|
||||
typedef enum {
|
||||
TSDB_GRANT_ALL,
|
||||
TSDB_GRANT_TIME,
|
||||
|
@ -49,6 +51,11 @@ typedef enum {
|
|||
} EGrantType;
|
||||
|
||||
int32_t grantCheck(EGrantType grant);
|
||||
#ifndef TD_GRANT_OPTIMIZE
|
||||
int32_t grantAlterActiveCode(const char* old, const char* newer, char* out, int8_t type);
|
||||
#else
|
||||
int32_t grantAlterActiveCode(int32_t did, const char* old, const char* newer, char* out, int8_t type);
|
||||
#endif
|
||||
|
||||
#ifndef GRANTS_CFG
|
||||
#ifdef TD_ENTERPRISE
|
||||
|
@ -107,4 +114,4 @@ int32_t grantCheck(EGrantType grant);
|
|||
}
|
||||
#endif
|
||||
|
||||
#endif /*_TD_COMMON_GRANT_H_*/
|
||||
#endif /*_TD_COMMON_GRANT_H_*/
|
||||
|
|
|
@ -768,6 +768,8 @@ typedef struct {
|
|||
char* pAst2;
|
||||
int64_t deleteMark1;
|
||||
int64_t deleteMark2;
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SMCreateStbReq;
|
||||
|
||||
int32_t tSerializeSMCreateStbReq(void* buf, int32_t bufLen, SMCreateStbReq* pReq);
|
||||
|
@ -788,10 +790,13 @@ typedef struct {
|
|||
int8_t source; // 1-taosX or 0-taosClient
|
||||
int8_t reserved[6];
|
||||
tb_uid_t suid;
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SMDropStbReq;
|
||||
|
||||
int32_t tSerializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq);
|
||||
int32_t tDeserializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq);
|
||||
void tFreeSMDropStbReq(SMDropStbReq *pReq);
|
||||
|
||||
typedef struct {
|
||||
char name[TSDB_TABLE_FNAME_LEN];
|
||||
|
@ -801,6 +806,8 @@ typedef struct {
|
|||
int32_t ttl;
|
||||
int32_t commentLen;
|
||||
char* comment;
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SMAlterStbReq;
|
||||
|
||||
int32_t tSerializeSMAlterStbReq(void* buf, int32_t bufLen, SMAlterStbReq* pReq);
|
||||
|
@ -871,10 +878,13 @@ int32_t tDeserializeSCreateAcctReq(void* buf, int32_t bufLen, SCreateAcctReq* pR
|
|||
|
||||
typedef struct {
|
||||
char user[TSDB_USER_LEN];
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SDropUserReq, SDropAcctReq;
|
||||
|
||||
int32_t tSerializeSDropUserReq(void* buf, int32_t bufLen, SDropUserReq* pReq);
|
||||
int32_t tDeserializeSDropUserReq(void* buf, int32_t bufLen, SDropUserReq* pReq);
|
||||
void tFreeSDropUserReq(SDropUserReq *pReq);
|
||||
|
||||
typedef struct SIpV4Range{
|
||||
uint32_t ip;
|
||||
|
@ -888,19 +898,21 @@ typedef struct {
|
|||
|
||||
SIpWhiteList* cloneIpWhiteList(SIpWhiteList* pIpWhiteList);
|
||||
typedef struct {
|
||||
int8_t createType;
|
||||
int8_t superUser; // denote if it is a super user or not
|
||||
int8_t sysInfo;
|
||||
int8_t enable;
|
||||
char user[TSDB_USER_LEN];
|
||||
char pass[TSDB_USET_PASSWORD_LEN];
|
||||
int8_t createType;
|
||||
int8_t superUser; // denote if it is a super user or not
|
||||
int8_t sysInfo;
|
||||
int8_t enable;
|
||||
char user[TSDB_USER_LEN];
|
||||
char pass[TSDB_USET_PASSWORD_LEN];
|
||||
int32_t numIpRanges;
|
||||
SIpV4Range* pIpRanges;
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SCreateUserReq;
|
||||
|
||||
int32_t tSerializeSCreateUserReq(void* buf, int32_t bufLen, SCreateUserReq* pReq);
|
||||
int32_t tDeserializeSCreateUserReq(void* buf, int32_t bufLen, SCreateUserReq* pReq);
|
||||
void tFreeSCreateUserReq(SCreateUserReq* pReq);
|
||||
void tFreeSCreateUserReq(SCreateUserReq *pReq);
|
||||
|
||||
typedef struct {
|
||||
int64_t ver;
|
||||
|
@ -927,18 +939,20 @@ int32_t tSerializeRetrieveIpWhite(void* buf, int32_t bufLen, SRetrieveIpWhiteReq
|
|||
int32_t tDeserializeRetrieveIpWhite(void* buf, int32_t bufLen, SRetrieveIpWhiteReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int8_t alterType;
|
||||
int8_t superUser;
|
||||
int8_t sysInfo;
|
||||
int8_t enable;
|
||||
char user[TSDB_USER_LEN];
|
||||
char pass[TSDB_USET_PASSWORD_LEN];
|
||||
char objname[TSDB_DB_FNAME_LEN]; // db or topic
|
||||
char tabName[TSDB_TABLE_NAME_LEN];
|
||||
char* tagCond;
|
||||
int32_t tagCondLen;
|
||||
int8_t alterType;
|
||||
int8_t superUser;
|
||||
int8_t sysInfo;
|
||||
int8_t enable;
|
||||
char user[TSDB_USER_LEN];
|
||||
char pass[TSDB_USET_PASSWORD_LEN];
|
||||
char objname[TSDB_DB_FNAME_LEN]; // db or topic
|
||||
char tabName[TSDB_TABLE_NAME_LEN];
|
||||
char* tagCond;
|
||||
int32_t tagCondLen;
|
||||
int32_t numIpRanges;
|
||||
SIpV4Range* pIpRanges;
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SAlterUserReq;
|
||||
|
||||
int32_t tSerializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
|
||||
|
@ -959,7 +973,7 @@ typedef struct {
|
|||
int8_t superAuth;
|
||||
int8_t sysInfo;
|
||||
int8_t enable;
|
||||
int8_t reserve;
|
||||
int8_t dropped;
|
||||
SHashObj* createdDbs;
|
||||
SHashObj* readDbs;
|
||||
SHashObj* writeDbs;
|
||||
|
@ -1096,6 +1110,7 @@ typedef struct {
|
|||
int32_t daysToKeep0;
|
||||
int32_t daysToKeep1;
|
||||
int32_t daysToKeep2;
|
||||
int32_t keepTimeOffset;
|
||||
int32_t minRows;
|
||||
int32_t maxRows;
|
||||
int32_t walFsyncPeriod;
|
||||
|
@ -1117,6 +1132,8 @@ typedef struct {
|
|||
int16_t hashPrefix;
|
||||
int16_t hashSuffix;
|
||||
int32_t tsdbPageSize;
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SCreateDbReq;
|
||||
|
||||
int32_t tSerializeSCreateDbReq(void* buf, int32_t bufLen, SCreateDbReq* pReq);
|
||||
|
@ -1133,6 +1150,7 @@ typedef struct {
|
|||
int32_t daysToKeep0;
|
||||
int32_t daysToKeep1;
|
||||
int32_t daysToKeep2;
|
||||
int32_t keepTimeOffset;
|
||||
int32_t walFsyncPeriod;
|
||||
int8_t walLevel;
|
||||
int8_t strict;
|
||||
|
@ -1142,18 +1160,24 @@ typedef struct {
|
|||
int32_t minRows;
|
||||
int32_t walRetentionPeriod;
|
||||
int32_t walRetentionSize;
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SAlterDbReq;
|
||||
|
||||
int32_t tSerializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq);
|
||||
int32_t tDeserializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq);
|
||||
void tFreeSAlterDbReq(SAlterDbReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
char db[TSDB_DB_FNAME_LEN];
|
||||
int8_t ignoreNotExists;
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SDropDbReq;
|
||||
|
||||
int32_t tSerializeSDropDbReq(void* buf, int32_t bufLen, SDropDbReq* pReq);
|
||||
int32_t tDeserializeSDropDbReq(void* buf, int32_t bufLen, SDropDbReq* pReq);
|
||||
void tFreeSDropDbReq(SDropDbReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
char db[TSDB_DB_FNAME_LEN];
|
||||
|
@ -1239,6 +1263,7 @@ typedef struct {
|
|||
int32_t daysToKeep0;
|
||||
int32_t daysToKeep1;
|
||||
int32_t daysToKeep2;
|
||||
int32_t keepTimeOffset;
|
||||
int32_t minRows;
|
||||
int32_t maxRows;
|
||||
int32_t walFsyncPeriod;
|
||||
|
@ -1347,10 +1372,13 @@ void tFreeSUserAuthBatchRsp(SUserAuthBatchRsp* pRsp);
|
|||
typedef struct {
|
||||
char db[TSDB_DB_FNAME_LEN];
|
||||
STimeWindow timeRange;
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SCompactDbReq;
|
||||
|
||||
int32_t tSerializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq);
|
||||
int32_t tDeserializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq);
|
||||
void tFreeSCompactDbReq(SCompactDbReq *pReq);
|
||||
|
||||
typedef struct {
|
||||
char name[TSDB_FUNC_NAME_LEN];
|
||||
|
@ -1464,6 +1492,11 @@ typedef struct {
|
|||
int32_t learnerProgress; // use one reservered
|
||||
} SVnodeLoad;
|
||||
|
||||
typedef struct {
|
||||
int32_t vgId;
|
||||
int64_t nTimeSeries;
|
||||
} SVnodeLoadLite;
|
||||
|
||||
typedef struct {
|
||||
int8_t syncState;
|
||||
int64_t syncTerm;
|
||||
|
@ -1511,6 +1544,16 @@ int32_t tSerializeSStatusReq(void* buf, int32_t bufLen, SStatusReq* pReq);
|
|||
int32_t tDeserializeSStatusReq(void* buf, int32_t bufLen, SStatusReq* pReq);
|
||||
void tFreeSStatusReq(SStatusReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t dnodeId;
|
||||
int64_t clusterId;
|
||||
SArray* pVloads;
|
||||
} SNotifyReq;
|
||||
|
||||
int32_t tSerializeSNotifyReq(void* buf, int32_t bufLen, SNotifyReq* pReq);
|
||||
int32_t tDeserializeSNotifyReq(void* buf, int32_t bufLen, SNotifyReq* pReq);
|
||||
void tFreeSNotifyReq(SNotifyReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t dnodeId;
|
||||
int64_t clusterId;
|
||||
|
@ -1576,6 +1619,7 @@ typedef struct {
|
|||
int32_t daysToKeep0;
|
||||
int32_t daysToKeep1;
|
||||
int32_t daysToKeep2;
|
||||
int32_t keepTimeOffset;
|
||||
int32_t minRows;
|
||||
int32_t maxRows;
|
||||
int32_t walFsyncPeriod;
|
||||
|
@ -1655,6 +1699,7 @@ typedef struct {
|
|||
int32_t daysToKeep0;
|
||||
int32_t daysToKeep1;
|
||||
int32_t daysToKeep2;
|
||||
int32_t keepTimeOffset;
|
||||
int32_t walFsyncPeriod;
|
||||
int8_t walLevel;
|
||||
int8_t strict;
|
||||
|
@ -1913,10 +1958,13 @@ void tFreeSExplainRsp(SExplainRsp* pRsp);
|
|||
typedef struct {
|
||||
char fqdn[TSDB_FQDN_LEN]; // end point, hostname:port
|
||||
int32_t port;
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SCreateDnodeReq;
|
||||
|
||||
int32_t tSerializeSCreateDnodeReq(void* buf, int32_t bufLen, SCreateDnodeReq* pReq);
|
||||
int32_t tDeserializeSCreateDnodeReq(void* buf, int32_t bufLen, SCreateDnodeReq* pReq);
|
||||
void tFreeSCreateDnodeReq(SCreateDnodeReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t dnodeId;
|
||||
|
@ -1924,10 +1972,13 @@ typedef struct {
|
|||
int32_t port;
|
||||
int8_t force;
|
||||
int8_t unsafe;
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SDropDnodeReq;
|
||||
|
||||
int32_t tSerializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
|
||||
int32_t tDeserializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
|
||||
void tFreeSDropDnodeReq(SDropDnodeReq* pReq);
|
||||
|
||||
enum {
|
||||
RESTORE_TYPE__ALL = 1,
|
||||
|
@ -1939,19 +1990,25 @@ enum {
|
|||
typedef struct {
|
||||
int32_t dnodeId;
|
||||
int8_t restoreType;
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SRestoreDnodeReq;
|
||||
|
||||
int32_t tSerializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq);
|
||||
int32_t tDeserializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq);
|
||||
void tFreeSRestoreDnodeReq(SRestoreDnodeReq *pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t dnodeId;
|
||||
char config[TSDB_DNODE_CONFIG_LEN];
|
||||
char value[TSDB_DNODE_VALUE_LEN];
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SMCfgDnodeReq;
|
||||
|
||||
int32_t tSerializeSMCfgDnodeReq(void* buf, int32_t bufLen, SMCfgDnodeReq* pReq);
|
||||
int32_t tDeserializeSMCfgDnodeReq(void* buf, int32_t bufLen, SMCfgDnodeReq* pReq);
|
||||
void tFreeSMCfgDnodeReq(SMCfgDnodeReq *pReq);
|
||||
|
||||
typedef struct {
|
||||
char config[TSDB_DNODE_CONFIG_LEN];
|
||||
|
@ -1963,12 +2020,15 @@ int32_t tDeserializeSDCfgDnodeReq(void* buf, int32_t bufLen, SDCfgDnodeReq* pReq
|
|||
|
||||
typedef struct {
|
||||
int32_t dnodeId;
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SMCreateMnodeReq, SMDropMnodeReq, SDDropMnodeReq, SMCreateQnodeReq, SMDropQnodeReq, SDCreateQnodeReq, SDDropQnodeReq,
|
||||
SMCreateSnodeReq, SMDropSnodeReq, SDCreateSnodeReq, SDDropSnodeReq;
|
||||
|
||||
int32_t tSerializeSCreateDropMQSNodeReq(void* buf, int32_t bufLen, SMCreateQnodeReq* pReq);
|
||||
int32_t tDeserializeSCreateDropMQSNodeReq(void* buf, int32_t bufLen, SMCreateQnodeReq* pReq);
|
||||
|
||||
void tFreeSMCreateQnodeReq(SMCreateQnodeReq *pReq);
|
||||
void tFreeSDDropQnodeReq(SDDropQnodeReq* pReq);
|
||||
typedef struct {
|
||||
int8_t replica;
|
||||
SReplica replicas[TSDB_MAX_REPLICA];
|
||||
|
@ -2003,10 +2063,13 @@ int32_t tDeserializeSKillTransReq(void* buf, int32_t bufLen, SKillTransReq* pReq
|
|||
|
||||
typedef struct {
|
||||
int32_t useless; // useless
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SBalanceVgroupReq;
|
||||
|
||||
int32_t tSerializeSBalanceVgroupReq(void* buf, int32_t bufLen, SBalanceVgroupReq* pReq);
|
||||
int32_t tDeserializeSBalanceVgroupReq(void* buf, int32_t bufLen, SBalanceVgroupReq* pReq);
|
||||
void tFreeSBalanceVgroupReq(SBalanceVgroupReq *pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t vgId1;
|
||||
|
@ -2021,17 +2084,24 @@ typedef struct {
|
|||
int32_t dnodeId1;
|
||||
int32_t dnodeId2;
|
||||
int32_t dnodeId3;
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SRedistributeVgroupReq;
|
||||
|
||||
int32_t tSerializeSRedistributeVgroupReq(void* buf, int32_t bufLen, SRedistributeVgroupReq* pReq);
|
||||
int32_t tDeserializeSRedistributeVgroupReq(void* buf, int32_t bufLen, SRedistributeVgroupReq* pReq);
|
||||
void tFreeSRedistributeVgroupReq(SRedistributeVgroupReq *pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t useless;
|
||||
int32_t vgId;
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SBalanceVgroupLeaderReq;
|
||||
|
||||
int32_t tSerializeSBalanceVgroupLeaderReq(void* buf, int32_t bufLen, SBalanceVgroupLeaderReq* pReq);
|
||||
int32_t tDeserializeSBalanceVgroupLeaderReq(void* buf, int32_t bufLen, SBalanceVgroupLeaderReq* pReq);
|
||||
void tFreeSBalanceVgroupLeaderReq(SBalanceVgroupLeaderReq *pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t vgId;
|
||||
|
@ -2295,17 +2365,6 @@ int32_t tSerializeSCMCreateStreamReq(void* buf, int32_t bufLen, const SCMCreateS
|
|||
int32_t tDeserializeSCMCreateStreamReq(void* buf, int32_t bufLen, SCMCreateStreamReq* pReq);
|
||||
void tFreeSCMCreateStreamReq(SCMCreateStreamReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
char name[TSDB_STREAM_FNAME_LEN];
|
||||
int64_t streamId;
|
||||
char* sql;
|
||||
char* executorMsg;
|
||||
} SMVCreateStreamReq, SMSCreateStreamReq;
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
} SMVCreateStreamRsp, SMSCreateStreamRsp;
|
||||
|
||||
enum {
|
||||
TOPIC_SUB_TYPE__DB = 1,
|
||||
TOPIC_SUB_TYPE__TABLE,
|
||||
|
@ -2327,16 +2386,9 @@ int32_t tSerializeSCMCreateTopicReq(void* buf, int32_t bufLen, const SCMCreateTo
|
|||
int32_t tDeserializeSCMCreateTopicReq(void* buf, int32_t bufLen, SCMCreateTopicReq* pReq);
|
||||
void tFreeSCMCreateTopicReq(SCMCreateTopicReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int64_t topicId;
|
||||
} SCMCreateTopicRsp;
|
||||
|
||||
int32_t tSerializeSCMCreateTopicRsp(void* buf, int32_t bufLen, const SCMCreateTopicRsp* pRsp);
|
||||
int32_t tDeserializeSCMCreateTopicRsp(void* buf, int32_t bufLen, SCMCreateTopicRsp* pRsp);
|
||||
|
||||
typedef struct {
|
||||
int64_t consumerId;
|
||||
} SMqConsumerLostMsg, SMqConsumerRecoverMsg, SMqConsumerClearMsg;
|
||||
} SMqConsumerRecoverMsg, SMqConsumerClearMsg;
|
||||
|
||||
typedef struct {
|
||||
int64_t consumerId;
|
||||
|
@ -2348,6 +2400,7 @@ typedef struct {
|
|||
int8_t autoCommit;
|
||||
int32_t autoCommitInterval;
|
||||
int8_t resetOffsetCfg;
|
||||
int8_t enableReplay;
|
||||
} SCMSubscribeReq;
|
||||
|
||||
static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubscribeReq* pReq) {
|
||||
|
@ -2367,6 +2420,7 @@ static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubsc
|
|||
tlen += taosEncodeFixedI8(buf, pReq->autoCommit);
|
||||
tlen += taosEncodeFixedI32(buf, pReq->autoCommitInterval);
|
||||
tlen += taosEncodeFixedI8(buf, pReq->resetOffsetCfg);
|
||||
tlen += taosEncodeFixedI8(buf, pReq->enableReplay);
|
||||
|
||||
return tlen;
|
||||
}
|
||||
|
@ -2390,71 +2444,7 @@ static FORCE_INLINE void* tDeserializeSCMSubscribeReq(void* buf, SCMSubscribeReq
|
|||
buf = taosDecodeFixedI8(buf, &pReq->autoCommit);
|
||||
buf = taosDecodeFixedI32(buf, &pReq->autoCommitInterval);
|
||||
buf = taosDecodeFixedI8(buf, &pReq->resetOffsetCfg);
|
||||
return buf;
|
||||
}
|
||||
|
||||
typedef struct SMqSubTopic {
|
||||
int32_t vgId;
|
||||
int64_t topicId;
|
||||
SEpSet epSet;
|
||||
} SMqSubTopic;
|
||||
|
||||
typedef struct {
|
||||
int32_t topicNum;
|
||||
SMqSubTopic topics[];
|
||||
} SCMSubscribeRsp;
|
||||
|
||||
static FORCE_INLINE int32_t tSerializeSCMSubscribeRsp(void** buf, const SCMSubscribeRsp* pRsp) {
|
||||
int32_t tlen = 0;
|
||||
tlen += taosEncodeFixedI32(buf, pRsp->topicNum);
|
||||
for (int32_t i = 0; i < pRsp->topicNum; i++) {
|
||||
tlen += taosEncodeFixedI32(buf, pRsp->topics[i].vgId);
|
||||
tlen += taosEncodeFixedI64(buf, pRsp->topics[i].topicId);
|
||||
tlen += taosEncodeSEpSet(buf, &pRsp->topics[i].epSet);
|
||||
}
|
||||
return tlen;
|
||||
}
|
||||
|
||||
static FORCE_INLINE void* tDeserializeSCMSubscribeRsp(void* buf, SCMSubscribeRsp* pRsp) {
|
||||
buf = taosDecodeFixedI32(buf, &pRsp->topicNum);
|
||||
for (int32_t i = 0; i < pRsp->topicNum; i++) {
|
||||
buf = taosDecodeFixedI32(buf, &pRsp->topics[i].vgId);
|
||||
buf = taosDecodeFixedI64(buf, &pRsp->topics[i].topicId);
|
||||
buf = taosDecodeSEpSet(buf, &pRsp->topics[i].epSet);
|
||||
}
|
||||
return buf;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
int64_t topicId;
|
||||
int64_t consumerId;
|
||||
int64_t consumerGroupId;
|
||||
int64_t offset;
|
||||
char* sql;
|
||||
char* logicalPlan;
|
||||
char* physicalPlan;
|
||||
} SMVSubscribeReq;
|
||||
|
||||
static FORCE_INLINE int32_t tSerializeSMVSubscribeReq(void** buf, SMVSubscribeReq* pReq) {
|
||||
int32_t tlen = 0;
|
||||
tlen += taosEncodeFixedI64(buf, pReq->topicId);
|
||||
tlen += taosEncodeFixedI64(buf, pReq->consumerId);
|
||||
tlen += taosEncodeFixedI64(buf, pReq->consumerGroupId);
|
||||
tlen += taosEncodeFixedI64(buf, pReq->offset);
|
||||
tlen += taosEncodeString(buf, pReq->sql);
|
||||
tlen += taosEncodeString(buf, pReq->logicalPlan);
|
||||
tlen += taosEncodeString(buf, pReq->physicalPlan);
|
||||
return tlen;
|
||||
}
|
||||
|
||||
static FORCE_INLINE void* tDeserializeSMVSubscribeReq(void* buf, SMVSubscribeReq* pReq) {
|
||||
buf = taosDecodeFixedI64(buf, &pReq->topicId);
|
||||
buf = taosDecodeFixedI64(buf, &pReq->consumerId);
|
||||
buf = taosDecodeFixedI64(buf, &pReq->consumerGroupId);
|
||||
buf = taosDecodeFixedI64(buf, &pReq->offset);
|
||||
buf = taosDecodeString(buf, &pReq->sql);
|
||||
buf = taosDecodeString(buf, &pReq->logicalPlan);
|
||||
buf = taosDecodeString(buf, &pReq->physicalPlan);
|
||||
buf = taosDecodeFixedI8(buf, &pReq->enableReplay);
|
||||
return buf;
|
||||
}
|
||||
|
||||
|
@ -2505,10 +2495,13 @@ typedef struct {
|
|||
typedef struct {
|
||||
char name[TSDB_TOPIC_FNAME_LEN];
|
||||
int8_t igNotExists;
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SMDropTopicReq;
|
||||
|
||||
int32_t tSerializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
|
||||
int32_t tDeserializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
|
||||
void tFreeSMDropTopicReq(SMDropTopicReq *pReq);
|
||||
|
||||
typedef struct {
|
||||
char topic[TSDB_TOPIC_FNAME_LEN];
|
||||
|
@ -2604,6 +2597,8 @@ typedef struct SVCreateTbReq {
|
|||
SSchemaWrapper schemaRow;
|
||||
} ntb;
|
||||
};
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SVCreateTbReq;
|
||||
|
||||
int tEncodeSVCreateTbReq(SEncoder* pCoder, const SVCreateTbReq* pReq);
|
||||
|
@ -2615,6 +2610,7 @@ static FORCE_INLINE void tdDestroySVCreateTbReq(SVCreateTbReq* req) {
|
|||
return;
|
||||
}
|
||||
|
||||
taosMemoryFreeClear(req->sql);
|
||||
taosMemoryFreeClear(req->name);
|
||||
taosMemoryFreeClear(req->comment);
|
||||
if (req->type == TSDB_CHILD_TABLE) {
|
||||
|
@ -3078,6 +3074,8 @@ typedef struct {
|
|||
typedef struct {
|
||||
char name[TSDB_STREAM_FNAME_LEN];
|
||||
int8_t igNotExists;
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
} SMDropStreamReq;
|
||||
|
||||
typedef struct {
|
||||
|
@ -3091,12 +3089,20 @@ typedef struct {
|
|||
int32_t taskId;
|
||||
} SVDropStreamTaskReq;
|
||||
|
||||
typedef struct {
|
||||
SMsgHead head;
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
int64_t dataVer;
|
||||
} SVStreamTaskVerUpdateReq;
|
||||
|
||||
typedef struct {
|
||||
int8_t reserved;
|
||||
} SVDropStreamTaskRsp;
|
||||
|
||||
int32_t tSerializeSMDropStreamReq(void* buf, int32_t bufLen, const SMDropStreamReq* pReq);
|
||||
int32_t tDeserializeSMDropStreamReq(void* buf, int32_t bufLen, SMDropStreamReq* pReq);
|
||||
void tFreeSMDropStreamReq(SMDropStreamReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
char name[TSDB_STREAM_FNAME_LEN];
|
||||
|
@ -3255,7 +3261,7 @@ typedef struct {
|
|||
SMsgHead head;
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
} SVPauseStreamTaskReq;
|
||||
} SVPauseStreamTaskReq, SVResetStreamTaskReq;
|
||||
|
||||
typedef struct {
|
||||
int8_t reserved;
|
||||
|
@ -3534,6 +3540,7 @@ typedef struct {
|
|||
int64_t consumerId;
|
||||
int64_t timeout;
|
||||
STqOffsetVal reqOffset;
|
||||
int8_t enableReplay;
|
||||
} SMqPollReq;
|
||||
|
||||
int32_t tSerializeSMqPollReq(void* buf, int32_t bufLen, SMqPollReq* pReq);
|
||||
|
@ -3593,6 +3600,7 @@ typedef struct {
|
|||
SArray* blockData;
|
||||
SArray* blockTbName;
|
||||
SArray* blockSchema;
|
||||
int64_t sleepTime;
|
||||
} SMqDataRsp;
|
||||
|
||||
int32_t tEncodeMqDataRsp(SEncoder* pEncoder, const SMqDataRsp* pRsp);
|
||||
|
|
|
@ -179,8 +179,7 @@ enum { // WARN: new msg should be appended to segment tail
|
|||
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_HEARTBEAT, "stream-heartbeat", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_RETRIEVE_IP_WHITE, "retrieve-ip-white", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_GET_USER_WHITELIST, "get-user-whitelist", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
|
||||
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_NOTIFY, "notify", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_BALANCE_VGROUP_LEADER, "balance-vgroup-leader", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_RESTORE_DNODE, "restore-dnode", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_PAUSE_STREAM, "pause-stream", NULL, NULL)
|
||||
|
@ -189,6 +188,8 @@ enum { // WARN: new msg should be appended to segment tail
|
|||
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_NODECHANGE_CHECK, "stream-nodechange-check", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_TRIM_DB_TIMER, "trim-db-tmr", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_GRANT_NOTIFY, "grant-notify", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_VND_MSG)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp)
|
||||
|
@ -298,17 +299,17 @@ enum { // WARN: new msg should be appended to segment tail
|
|||
TD_DEF_MSG_TYPE(TDMT_SYNC_HEARTBEAT, "sync-heartbeat", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_SYNC_HEARTBEAT_REPLY, "sync-heartbeat-reply", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_SYNC_LOCAL_CMD, "sync-local-cmd", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_SYNC_PRE_SNAPSHOT, "sync-pre-snapshot", NULL, NULL) // no longer used
|
||||
TD_DEF_MSG_TYPE(TDMT_SYNC_PRE_SNAPSHOT_REPLY, "sync-pre-snapshot-reply", NULL, NULL) // no longer used
|
||||
TD_DEF_MSG_TYPE(TDMT_SYNC_PREP_SNAPSHOT, "sync-prep-snapshot", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_SYNC_PREP_SNAPSHOT_REPLY, "sync-prep-snapshot-reply", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_SYNC_FORCE_FOLLOWER, "sync-force-become-follower", NULL, NULL)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_VND_STREAM_MSG)
|
||||
// TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY, "vnode-stream-scan-history", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY_FINISH, "vnode-stream-scan-history-finish", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_CHECK_POINT_SOURCE, "vnode-stream-checkpoint-source", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_UPDATE, "vnode-stream-update", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_RESET, "vnode-stream-reset", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_CHECK, "vnode-stream-task-check", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_MAX_MSG, "vnd-stream-max", NULL, NULL)
|
||||
|
||||
|
|
|
@ -115,247 +115,253 @@
|
|||
#define TK_STT_TRIGGER 96
|
||||
#define TK_TABLE_PREFIX 97
|
||||
#define TK_TABLE_SUFFIX 98
|
||||
#define TK_NK_COLON 99
|
||||
#define TK_BWLIMIT 100
|
||||
#define TK_START 101
|
||||
#define TK_TIMESTAMP 102
|
||||
#define TK_END 103
|
||||
#define TK_TABLE 104
|
||||
#define TK_NK_LP 105
|
||||
#define TK_NK_RP 106
|
||||
#define TK_STABLE 107
|
||||
#define TK_COLUMN 108
|
||||
#define TK_MODIFY 109
|
||||
#define TK_RENAME 110
|
||||
#define TK_TAG 111
|
||||
#define TK_SET 112
|
||||
#define TK_NK_EQ 113
|
||||
#define TK_USING 114
|
||||
#define TK_TAGS 115
|
||||
#define TK_BOOL 116
|
||||
#define TK_TINYINT 117
|
||||
#define TK_SMALLINT 118
|
||||
#define TK_INT 119
|
||||
#define TK_INTEGER 120
|
||||
#define TK_BIGINT 121
|
||||
#define TK_FLOAT 122
|
||||
#define TK_DOUBLE 123
|
||||
#define TK_BINARY 124
|
||||
#define TK_NCHAR 125
|
||||
#define TK_UNSIGNED 126
|
||||
#define TK_JSON 127
|
||||
#define TK_VARCHAR 128
|
||||
#define TK_MEDIUMBLOB 129
|
||||
#define TK_BLOB 130
|
||||
#define TK_VARBINARY 131
|
||||
#define TK_GEOMETRY 132
|
||||
#define TK_DECIMAL 133
|
||||
#define TK_COMMENT 134
|
||||
#define TK_MAX_DELAY 135
|
||||
#define TK_WATERMARK 136
|
||||
#define TK_ROLLUP 137
|
||||
#define TK_TTL 138
|
||||
#define TK_SMA 139
|
||||
#define TK_DELETE_MARK 140
|
||||
#define TK_FIRST 141
|
||||
#define TK_LAST 142
|
||||
#define TK_SHOW 143
|
||||
#define TK_PRIVILEGES 144
|
||||
#define TK_DATABASES 145
|
||||
#define TK_TABLES 146
|
||||
#define TK_STABLES 147
|
||||
#define TK_MNODES 148
|
||||
#define TK_QNODES 149
|
||||
#define TK_FUNCTIONS 150
|
||||
#define TK_INDEXES 151
|
||||
#define TK_ACCOUNTS 152
|
||||
#define TK_APPS 153
|
||||
#define TK_CONNECTIONS 154
|
||||
#define TK_LICENCES 155
|
||||
#define TK_GRANTS 156
|
||||
#define TK_QUERIES 157
|
||||
#define TK_SCORES 158
|
||||
#define TK_TOPICS 159
|
||||
#define TK_VARIABLES 160
|
||||
#define TK_CLUSTER 161
|
||||
#define TK_BNODES 162
|
||||
#define TK_SNODES 163
|
||||
#define TK_TRANSACTIONS 164
|
||||
#define TK_DISTRIBUTED 165
|
||||
#define TK_CONSUMERS 166
|
||||
#define TK_SUBSCRIPTIONS 167
|
||||
#define TK_VNODES 168
|
||||
#define TK_ALIVE 169
|
||||
#define TK_LIKE 170
|
||||
#define TK_TBNAME 171
|
||||
#define TK_QTAGS 172
|
||||
#define TK_AS 173
|
||||
#define TK_INDEX 174
|
||||
#define TK_FUNCTION 175
|
||||
#define TK_INTERVAL 176
|
||||
#define TK_COUNT 177
|
||||
#define TK_LAST_ROW 178
|
||||
#define TK_META 179
|
||||
#define TK_ONLY 180
|
||||
#define TK_TOPIC 181
|
||||
#define TK_CONSUMER 182
|
||||
#define TK_GROUP 183
|
||||
#define TK_DESC 184
|
||||
#define TK_DESCRIBE 185
|
||||
#define TK_RESET 186
|
||||
#define TK_QUERY 187
|
||||
#define TK_CACHE 188
|
||||
#define TK_EXPLAIN 189
|
||||
#define TK_ANALYZE 190
|
||||
#define TK_VERBOSE 191
|
||||
#define TK_NK_BOOL 192
|
||||
#define TK_RATIO 193
|
||||
#define TK_NK_FLOAT 194
|
||||
#define TK_OUTPUTTYPE 195
|
||||
#define TK_AGGREGATE 196
|
||||
#define TK_BUFSIZE 197
|
||||
#define TK_LANGUAGE 198
|
||||
#define TK_REPLACE 199
|
||||
#define TK_STREAM 200
|
||||
#define TK_INTO 201
|
||||
#define TK_PAUSE 202
|
||||
#define TK_RESUME 203
|
||||
#define TK_TRIGGER 204
|
||||
#define TK_AT_ONCE 205
|
||||
#define TK_WINDOW_CLOSE 206
|
||||
#define TK_IGNORE 207
|
||||
#define TK_EXPIRED 208
|
||||
#define TK_FILL_HISTORY 209
|
||||
#define TK_UPDATE 210
|
||||
#define TK_SUBTABLE 211
|
||||
#define TK_UNTREATED 212
|
||||
#define TK_KILL 213
|
||||
#define TK_CONNECTION 214
|
||||
#define TK_TRANSACTION 215
|
||||
#define TK_BALANCE 216
|
||||
#define TK_VGROUP 217
|
||||
#define TK_LEADER 218
|
||||
#define TK_MERGE 219
|
||||
#define TK_REDISTRIBUTE 220
|
||||
#define TK_SPLIT 221
|
||||
#define TK_DELETE 222
|
||||
#define TK_INSERT 223
|
||||
#define TK_NULL 224
|
||||
#define TK_NK_QUESTION 225
|
||||
#define TK_NK_ARROW 226
|
||||
#define TK_ROWTS 227
|
||||
#define TK_QSTART 228
|
||||
#define TK_QEND 229
|
||||
#define TK_QDURATION 230
|
||||
#define TK_WSTART 231
|
||||
#define TK_WEND 232
|
||||
#define TK_WDURATION 233
|
||||
#define TK_IROWTS 234
|
||||
#define TK_ISFILLED 235
|
||||
#define TK_CAST 236
|
||||
#define TK_NOW 237
|
||||
#define TK_TODAY 238
|
||||
#define TK_TIMEZONE 239
|
||||
#define TK_CLIENT_VERSION 240
|
||||
#define TK_SERVER_VERSION 241
|
||||
#define TK_SERVER_STATUS 242
|
||||
#define TK_CURRENT_USER 243
|
||||
#define TK_CASE 244
|
||||
#define TK_WHEN 245
|
||||
#define TK_THEN 246
|
||||
#define TK_ELSE 247
|
||||
#define TK_BETWEEN 248
|
||||
#define TK_IS 249
|
||||
#define TK_NK_LT 250
|
||||
#define TK_NK_GT 251
|
||||
#define TK_NK_LE 252
|
||||
#define TK_NK_GE 253
|
||||
#define TK_NK_NE 254
|
||||
#define TK_MATCH 255
|
||||
#define TK_NMATCH 256
|
||||
#define TK_CONTAINS 257
|
||||
#define TK_IN 258
|
||||
#define TK_JOIN 259
|
||||
#define TK_INNER 260
|
||||
#define TK_SELECT 261
|
||||
#define TK_NK_HINT 262
|
||||
#define TK_DISTINCT 263
|
||||
#define TK_WHERE 264
|
||||
#define TK_PARTITION 265
|
||||
#define TK_BY 266
|
||||
#define TK_SESSION 267
|
||||
#define TK_STATE_WINDOW 268
|
||||
#define TK_EVENT_WINDOW 269
|
||||
#define TK_SLIDING 270
|
||||
#define TK_FILL 271
|
||||
#define TK_VALUE 272
|
||||
#define TK_VALUE_F 273
|
||||
#define TK_NONE 274
|
||||
#define TK_PREV 275
|
||||
#define TK_NULL_F 276
|
||||
#define TK_LINEAR 277
|
||||
#define TK_NEXT 278
|
||||
#define TK_HAVING 279
|
||||
#define TK_RANGE 280
|
||||
#define TK_EVERY 281
|
||||
#define TK_ORDER 282
|
||||
#define TK_SLIMIT 283
|
||||
#define TK_SOFFSET 284
|
||||
#define TK_LIMIT 285
|
||||
#define TK_OFFSET 286
|
||||
#define TK_ASC 287
|
||||
#define TK_NULLS 288
|
||||
#define TK_ABORT 289
|
||||
#define TK_AFTER 290
|
||||
#define TK_ATTACH 291
|
||||
#define TK_BEFORE 292
|
||||
#define TK_BEGIN 293
|
||||
#define TK_BITAND 294
|
||||
#define TK_BITNOT 295
|
||||
#define TK_BITOR 296
|
||||
#define TK_BLOCKS 297
|
||||
#define TK_CHANGE 298
|
||||
#define TK_COMMA 299
|
||||
#define TK_CONCAT 300
|
||||
#define TK_CONFLICT 301
|
||||
#define TK_COPY 302
|
||||
#define TK_DEFERRED 303
|
||||
#define TK_DELIMITERS 304
|
||||
#define TK_DETACH 305
|
||||
#define TK_DIVIDE 306
|
||||
#define TK_DOT 307
|
||||
#define TK_EACH 308
|
||||
#define TK_FAIL 309
|
||||
#define TK_FILE 310
|
||||
#define TK_FOR 311
|
||||
#define TK_GLOB 312
|
||||
#define TK_ID 313
|
||||
#define TK_IMMEDIATE 314
|
||||
#define TK_IMPORT 315
|
||||
#define TK_INITIALLY 316
|
||||
#define TK_INSTEAD 317
|
||||
#define TK_ISNULL 318
|
||||
#define TK_KEY 319
|
||||
#define TK_MODULES 320
|
||||
#define TK_NK_BITNOT 321
|
||||
#define TK_NK_SEMI 322
|
||||
#define TK_NOTNULL 323
|
||||
#define TK_OF 324
|
||||
#define TK_PLUS 325
|
||||
#define TK_PRIVILEGE 326
|
||||
#define TK_RAISE 327
|
||||
#define TK_RESTRICT 328
|
||||
#define TK_ROW 329
|
||||
#define TK_SEMI 330
|
||||
#define TK_STAR 331
|
||||
#define TK_STATEMENT 332
|
||||
#define TK_STRICT 333
|
||||
#define TK_STRING 334
|
||||
#define TK_TIMES 335
|
||||
#define TK_VALUES 336
|
||||
#define TK_VARIABLE 337
|
||||
#define TK_VIEW 338
|
||||
#define TK_WAL 339
|
||||
#define TK_KEEP_TIME_OFFSET 99
|
||||
#define TK_NK_COLON 100
|
||||
#define TK_BWLIMIT 101
|
||||
#define TK_START 102
|
||||
#define TK_TIMESTAMP 103
|
||||
#define TK_END 104
|
||||
#define TK_TABLE 105
|
||||
#define TK_NK_LP 106
|
||||
#define TK_NK_RP 107
|
||||
#define TK_STABLE 108
|
||||
#define TK_COLUMN 109
|
||||
#define TK_MODIFY 110
|
||||
#define TK_RENAME 111
|
||||
#define TK_TAG 112
|
||||
#define TK_SET 113
|
||||
#define TK_NK_EQ 114
|
||||
#define TK_USING 115
|
||||
#define TK_TAGS 116
|
||||
#define TK_BOOL 117
|
||||
#define TK_TINYINT 118
|
||||
#define TK_SMALLINT 119
|
||||
#define TK_INT 120
|
||||
#define TK_INTEGER 121
|
||||
#define TK_BIGINT 122
|
||||
#define TK_FLOAT 123
|
||||
#define TK_DOUBLE 124
|
||||
#define TK_BINARY 125
|
||||
#define TK_NCHAR 126
|
||||
#define TK_UNSIGNED 127
|
||||
#define TK_JSON 128
|
||||
#define TK_VARCHAR 129
|
||||
#define TK_MEDIUMBLOB 130
|
||||
#define TK_BLOB 131
|
||||
#define TK_VARBINARY 132
|
||||
#define TK_GEOMETRY 133
|
||||
#define TK_DECIMAL 134
|
||||
#define TK_COMMENT 135
|
||||
#define TK_MAX_DELAY 136
|
||||
#define TK_WATERMARK 137
|
||||
#define TK_ROLLUP 138
|
||||
#define TK_TTL 139
|
||||
#define TK_SMA 140
|
||||
#define TK_DELETE_MARK 141
|
||||
#define TK_FIRST 142
|
||||
#define TK_LAST 143
|
||||
#define TK_SHOW 144
|
||||
#define TK_PRIVILEGES 145
|
||||
#define TK_DATABASES 146
|
||||
#define TK_TABLES 147
|
||||
#define TK_STABLES 148
|
||||
#define TK_MNODES 149
|
||||
#define TK_QNODES 150
|
||||
#define TK_FUNCTIONS 151
|
||||
#define TK_INDEXES 152
|
||||
#define TK_ACCOUNTS 153
|
||||
#define TK_APPS 154
|
||||
#define TK_CONNECTIONS 155
|
||||
#define TK_LICENCES 156
|
||||
#define TK_GRANTS 157
|
||||
#define TK_QUERIES 158
|
||||
#define TK_SCORES 159
|
||||
#define TK_TOPICS 160
|
||||
#define TK_VARIABLES 161
|
||||
#define TK_CLUSTER 162
|
||||
#define TK_BNODES 163
|
||||
#define TK_SNODES 164
|
||||
#define TK_TRANSACTIONS 165
|
||||
#define TK_DISTRIBUTED 166
|
||||
#define TK_CONSUMERS 167
|
||||
#define TK_SUBSCRIPTIONS 168
|
||||
#define TK_VNODES 169
|
||||
#define TK_ALIVE 170
|
||||
#define TK_NORMAL 171
|
||||
#define TK_CHILD 172
|
||||
#define TK_LIKE 173
|
||||
#define TK_TBNAME 174
|
||||
#define TK_QTAGS 175
|
||||
#define TK_AS 176
|
||||
#define TK_SYSTEM 177
|
||||
#define TK_INDEX 178
|
||||
#define TK_FUNCTION 179
|
||||
#define TK_INTERVAL 180
|
||||
#define TK_COUNT 181
|
||||
#define TK_LAST_ROW 182
|
||||
#define TK_META 183
|
||||
#define TK_ONLY 184
|
||||
#define TK_TOPIC 185
|
||||
#define TK_CONSUMER 186
|
||||
#define TK_GROUP 187
|
||||
#define TK_DESC 188
|
||||
#define TK_DESCRIBE 189
|
||||
#define TK_RESET 190
|
||||
#define TK_QUERY 191
|
||||
#define TK_CACHE 192
|
||||
#define TK_EXPLAIN 193
|
||||
#define TK_ANALYZE 194
|
||||
#define TK_VERBOSE 195
|
||||
#define TK_NK_BOOL 196
|
||||
#define TK_RATIO 197
|
||||
#define TK_NK_FLOAT 198
|
||||
#define TK_OUTPUTTYPE 199
|
||||
#define TK_AGGREGATE 200
|
||||
#define TK_BUFSIZE 201
|
||||
#define TK_LANGUAGE 202
|
||||
#define TK_REPLACE 203
|
||||
#define TK_STREAM 204
|
||||
#define TK_INTO 205
|
||||
#define TK_PAUSE 206
|
||||
#define TK_RESUME 207
|
||||
#define TK_TRIGGER 208
|
||||
#define TK_AT_ONCE 209
|
||||
#define TK_WINDOW_CLOSE 210
|
||||
#define TK_IGNORE 211
|
||||
#define TK_EXPIRED 212
|
||||
#define TK_FILL_HISTORY 213
|
||||
#define TK_UPDATE 214
|
||||
#define TK_SUBTABLE 215
|
||||
#define TK_UNTREATED 216
|
||||
#define TK_KILL 217
|
||||
#define TK_CONNECTION 218
|
||||
#define TK_TRANSACTION 219
|
||||
#define TK_BALANCE 220
|
||||
#define TK_VGROUP 221
|
||||
#define TK_LEADER 222
|
||||
#define TK_MERGE 223
|
||||
#define TK_REDISTRIBUTE 224
|
||||
#define TK_SPLIT 225
|
||||
#define TK_DELETE 226
|
||||
#define TK_INSERT 227
|
||||
#define TK_NULL 228
|
||||
#define TK_NK_QUESTION 229
|
||||
#define TK_NK_ARROW 230
|
||||
#define TK_ROWTS 231
|
||||
#define TK_QSTART 232
|
||||
#define TK_QEND 233
|
||||
#define TK_QDURATION 234
|
||||
#define TK_WSTART 235
|
||||
#define TK_WEND 236
|
||||
#define TK_WDURATION 237
|
||||
#define TK_IROWTS 238
|
||||
#define TK_ISFILLED 239
|
||||
#define TK_CAST 240
|
||||
#define TK_NOW 241
|
||||
#define TK_TODAY 242
|
||||
#define TK_TIMEZONE 243
|
||||
#define TK_CLIENT_VERSION 244
|
||||
#define TK_SERVER_VERSION 245
|
||||
#define TK_SERVER_STATUS 246
|
||||
#define TK_CURRENT_USER 247
|
||||
#define TK_CASE 248
|
||||
#define TK_WHEN 249
|
||||
#define TK_THEN 250
|
||||
#define TK_ELSE 251
|
||||
#define TK_BETWEEN 252
|
||||
#define TK_IS 253
|
||||
#define TK_NK_LT 254
|
||||
#define TK_NK_GT 255
|
||||
#define TK_NK_LE 256
|
||||
#define TK_NK_GE 257
|
||||
#define TK_NK_NE 258
|
||||
#define TK_MATCH 259
|
||||
#define TK_NMATCH 260
|
||||
#define TK_CONTAINS 261
|
||||
#define TK_IN 262
|
||||
#define TK_JOIN 263
|
||||
#define TK_INNER 264
|
||||
#define TK_SELECT 265
|
||||
#define TK_NK_HINT 266
|
||||
#define TK_DISTINCT 267
|
||||
#define TK_WHERE 268
|
||||
#define TK_PARTITION 269
|
||||
#define TK_BY 270
|
||||
#define TK_SESSION 271
|
||||
#define TK_STATE_WINDOW 272
|
||||
#define TK_EVENT_WINDOW 273
|
||||
#define TK_SLIDING 274
|
||||
#define TK_FILL 275
|
||||
#define TK_VALUE 276
|
||||
#define TK_VALUE_F 277
|
||||
#define TK_NONE 278
|
||||
#define TK_PREV 279
|
||||
#define TK_NULL_F 280
|
||||
#define TK_LINEAR 281
|
||||
#define TK_NEXT 282
|
||||
#define TK_HAVING 283
|
||||
#define TK_RANGE 284
|
||||
#define TK_EVERY 285
|
||||
#define TK_ORDER 286
|
||||
#define TK_SLIMIT 287
|
||||
#define TK_SOFFSET 288
|
||||
#define TK_LIMIT 289
|
||||
#define TK_OFFSET 290
|
||||
#define TK_ASC 291
|
||||
#define TK_NULLS 292
|
||||
#define TK_ABORT 293
|
||||
#define TK_AFTER 294
|
||||
#define TK_ATTACH 295
|
||||
#define TK_BEFORE 296
|
||||
#define TK_BEGIN 297
|
||||
#define TK_BITAND 298
|
||||
#define TK_BITNOT 299
|
||||
#define TK_BITOR 300
|
||||
#define TK_BLOCKS 301
|
||||
#define TK_CHANGE 302
|
||||
#define TK_COMMA 303
|
||||
#define TK_CONCAT 304
|
||||
#define TK_CONFLICT 305
|
||||
#define TK_COPY 306
|
||||
#define TK_DEFERRED 307
|
||||
#define TK_DELIMITERS 308
|
||||
#define TK_DETACH 309
|
||||
#define TK_DIVIDE 310
|
||||
#define TK_DOT 311
|
||||
#define TK_EACH 312
|
||||
#define TK_FAIL 313
|
||||
#define TK_FILE 314
|
||||
#define TK_FOR 315
|
||||
#define TK_GLOB 316
|
||||
#define TK_ID 317
|
||||
#define TK_IMMEDIATE 318
|
||||
#define TK_IMPORT 319
|
||||
#define TK_INITIALLY 320
|
||||
#define TK_INSTEAD 321
|
||||
#define TK_ISNULL 322
|
||||
#define TK_KEY 323
|
||||
#define TK_MODULES 324
|
||||
#define TK_NK_BITNOT 325
|
||||
#define TK_NK_SEMI 326
|
||||
#define TK_NOTNULL 327
|
||||
#define TK_OF 328
|
||||
#define TK_PLUS 329
|
||||
#define TK_PRIVILEGE 330
|
||||
#define TK_RAISE 331
|
||||
#define TK_RESTRICT 332
|
||||
#define TK_ROW 333
|
||||
#define TK_SEMI 334
|
||||
#define TK_STAR 335
|
||||
#define TK_STATEMENT 336
|
||||
#define TK_STRICT 337
|
||||
#define TK_STRING 338
|
||||
#define TK_TIMES 339
|
||||
#define TK_VALUES 340
|
||||
#define TK_VARIABLE 341
|
||||
#define TK_VIEW 342
|
||||
#define TK_WAL 343
|
||||
|
||||
|
||||
|
||||
|
||||
#define TK_NK_SPACE 600
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define AUDIT_DETAIL_MAX 16000
|
||||
#define AUDIT_DETAIL_MAX 65472
|
||||
|
||||
typedef struct {
|
||||
const char *server;
|
||||
|
@ -39,7 +39,8 @@ typedef struct {
|
|||
|
||||
int32_t auditInit(const SAuditCfg *pCfg);
|
||||
void auditSend(SJson *pJson);
|
||||
void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, char *detail);
|
||||
void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2,
|
||||
char *detail, int32_t len);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
typedef struct SExplainCtx SExplainCtx;
|
||||
|
||||
int32_t qExecCommand(int64_t* pConnId, bool sysInfoUser, SNode *pStmt, SRetrieveTableRsp **pRsp);
|
||||
int32_t qExecCommand(int64_t* pConnId, bool sysInfoUser, SNode *pStmt, SRetrieveTableRsp **pRsp, int8_t biMode);
|
||||
|
||||
int32_t qExecStaticExplain(SQueryPlan *pDag, SRetrieveTableRsp **pRsp);
|
||||
int32_t qExecExplainBegin(SQueryPlan *pDag, SExplainCtx **pCtx, int64_t startTs);
|
||||
|
|
|
@ -38,6 +38,9 @@ extern "C" {
|
|||
|
||||
#define META_READER_NOLOCK 0x1
|
||||
|
||||
#define STREAM_STATE_BUFF_HASH 1
|
||||
#define STREAM_STATE_BUFF_SORT 2
|
||||
|
||||
typedef struct SMeta SMeta;
|
||||
typedef TSKEY (*GetTsFun)(void*);
|
||||
|
||||
|
@ -115,6 +118,7 @@ typedef struct SRowBuffPos {
|
|||
void* pKey;
|
||||
bool beFlushed;
|
||||
bool beUsed;
|
||||
bool needFree;
|
||||
} SRowBuffPos;
|
||||
|
||||
// tq
|
||||
|
@ -142,6 +146,7 @@ typedef struct SSnapContext {
|
|||
typedef struct {
|
||||
int64_t uid;
|
||||
int64_t ctbNum;
|
||||
int32_t colNum;
|
||||
} SMetaStbStats;
|
||||
|
||||
// void tqReaderSetColIdList(STqReader *pReader, SArray *pColIdList);
|
||||
|
@ -222,6 +227,7 @@ typedef struct SStoreTqReader {
|
|||
bool (*tqReaderNextBlockInWal)();
|
||||
bool (*tqNextBlockImpl)(); // todo remove it
|
||||
SSDataBlock* (*tqGetResultBlock)();
|
||||
int64_t (*tqGetResultBlockTime)();
|
||||
|
||||
void (*tqReaderSetColIdList)();
|
||||
int32_t (*tqReaderSetQueryTableList)();
|
||||
|
@ -285,8 +291,8 @@ typedef struct SStoreMeta {
|
|||
|
||||
// db name, vgId, numOfTables, numOfSTables
|
||||
int32_t (*getNumOfChildTables)(
|
||||
void* pVnode, int64_t uid,
|
||||
int64_t* numOfTables); // int32_t metaGetStbStats(SMeta *pMeta, int64_t uid, SMetaStbStats *pInfo);
|
||||
void* pVnode, int64_t uid, int64_t* numOfTables,
|
||||
int32_t* numOfCols); // int32_t metaGetStbStats(SMeta *pMeta, int64_t uid, SMetaStbStats *pInfo);
|
||||
void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables,
|
||||
int64_t* numOfNormalTables); // vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId) &
|
||||
// metaGetTbNum(SMeta *pMeta) & metaGetNtbNum(SMeta *pMeta);
|
||||
|
@ -332,6 +338,8 @@ typedef struct {
|
|||
void* db; // rocksdb_t* db;
|
||||
void* pCur;
|
||||
int64_t number;
|
||||
void* pStreamFileState;
|
||||
int32_t buffIndex;
|
||||
} SStreamStateCur;
|
||||
|
||||
typedef struct SStateStore {
|
||||
|
@ -339,7 +347,8 @@ typedef struct SStateStore {
|
|||
int32_t (*streamStateGetParName)(SStreamState* pState, int64_t groupId, void** pVal);
|
||||
|
||||
int32_t (*streamStateAddIfNotExist)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
||||
int32_t (*streamStateReleaseBuf)(SStreamState* pState, const SWinKey* key, void* pVal);
|
||||
int32_t (*streamStateReleaseBuf)(SStreamState* pState, void* pVal, bool used);
|
||||
int32_t (*streamStateClearBuff)(SStreamState* pState, void* pVal);
|
||||
void (*streamStateFreeVal)(void* val);
|
||||
|
||||
int32_t (*streamStatePut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
|
||||
|
@ -370,7 +379,7 @@ typedef struct SStateStore {
|
|||
|
||||
int32_t (*streamStateSessionAddIfNotExist)(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal,
|
||||
int32_t* pVLen);
|
||||
int32_t (*streamStateSessionPut)(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen);
|
||||
int32_t (*streamStateSessionPut)(SStreamState* pState, const SSessionKey* key, void* value, int32_t vLen);
|
||||
int32_t (*streamStateSessionGet)(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen);
|
||||
int32_t (*streamStateSessionDel)(SStreamState* pState, const SSessionKey* key);
|
||||
int32_t (*streamStateSessionClear)(SStreamState* pState);
|
||||
|
@ -399,7 +408,7 @@ typedef struct SStateStore {
|
|||
|
||||
struct SStreamFileState* (*streamFileStateInit)(int64_t memSize, uint32_t keySize, uint32_t rowSize,
|
||||
uint32_t selectRowSize, GetTsFun fp, void* pFile, TSKEY delMark,
|
||||
const char* id, int64_t ckId);
|
||||
const char* id, int64_t ckId, int8_t type);
|
||||
|
||||
void (*streamFileStateDestroy)(struct SStreamFileState* pFileState);
|
||||
void (*streamFileStateClear)(struct SStreamFileState* pFileState);
|
||||
|
|
|
@ -191,7 +191,7 @@ typedef struct {
|
|||
} SMonBmInfo;
|
||||
|
||||
typedef struct {
|
||||
SArray *pVloads; // SVnodeLoad
|
||||
SArray *pVloads; // SVnodeLoad/SVnodeLoadLite
|
||||
} SMonVloadInfo;
|
||||
|
||||
typedef struct {
|
||||
|
@ -206,6 +206,11 @@ typedef struct {
|
|||
bool comp;
|
||||
} SMonCfg;
|
||||
|
||||
typedef struct {
|
||||
int8_t state;
|
||||
tsem_t sem;
|
||||
} SDmNotifyHandle;
|
||||
|
||||
int32_t monInit(const SMonCfg *pCfg);
|
||||
void monCleanup();
|
||||
void monRecordLog(int64_t ts, ELogLevel level, const char *content);
|
||||
|
|
|
@ -66,6 +66,7 @@ typedef struct SDatabaseOptions {
|
|||
int32_t minRowsPerBlock;
|
||||
SNodeList* pKeep;
|
||||
int64_t keep[3];
|
||||
int32_t keepTimeOffset;
|
||||
int32_t pages;
|
||||
int32_t pagesize;
|
||||
int32_t tsdbPageSize;
|
||||
|
@ -273,6 +274,7 @@ typedef struct SShowStmt {
|
|||
SNode* pDbName; // SValueNode
|
||||
SNode* pTbName; // SValueNode
|
||||
EOperatorType tableCondType;
|
||||
EShowKind showKind; // show databases: user/system, show tables: normal/child, others NULL
|
||||
} SShowStmt;
|
||||
|
||||
typedef struct SShowCreateDatabaseStmt {
|
||||
|
@ -505,6 +507,7 @@ typedef struct SBalanceVgroupStmt {
|
|||
|
||||
typedef struct SBalanceVgroupLeaderStmt {
|
||||
ENodeType type;
|
||||
int32_t vgId;
|
||||
} SBalanceVgroupLeaderStmt;
|
||||
|
||||
typedef struct SMergeVgroupStmt {
|
||||
|
|
|
@ -124,6 +124,7 @@ int32_t nodesListStrictAppendList(SNodeList* pTarget, SNodeList* pSrc);
|
|||
int32_t nodesListPushFront(SNodeList* pList, SNode* pNode);
|
||||
SListCell* nodesListErase(SNodeList* pList, SListCell* pCell);
|
||||
void nodesListInsertList(SNodeList* pTarget, SListCell* pPos, SNodeList* pSrc);
|
||||
void nodesListInsertListAfterPos(SNodeList* pTarget, SListCell* pPos, SNodeList* pSrc);
|
||||
SNode* nodesListGetNode(SNodeList* pList, int32_t index);
|
||||
SListCell* nodesListGetCell(SNodeList* pList, int32_t index);
|
||||
void nodesDestroyList(SNodeList* pList);
|
||||
|
|
|
@ -35,6 +35,7 @@ typedef struct SRawExprNode {
|
|||
char* p;
|
||||
uint32_t n;
|
||||
SNode* pNode;
|
||||
bool isPseudoColumn;
|
||||
} SRawExprNode;
|
||||
|
||||
typedef struct SDataType {
|
||||
|
@ -277,6 +278,14 @@ typedef enum ETimeLineMode {
|
|||
TIME_LINE_GLOBAL,
|
||||
} ETimeLineMode;
|
||||
|
||||
typedef enum EShowKind {
|
||||
SHOW_KIND_ALL = 1,
|
||||
SHOW_KIND_TABLES_NORMAL,
|
||||
SHOW_KIND_TABLES_CHILD,
|
||||
SHOW_KIND_DATABASES_USER,
|
||||
SHOW_KIND_DATABASES_SYSTEM
|
||||
} EShowKind;
|
||||
|
||||
typedef struct SFillNode {
|
||||
ENodeType type; // QUERY_NODE_FILL
|
||||
EFillMode mode;
|
||||
|
@ -519,6 +528,8 @@ void* nodesGetValueFromNode(SValueNode* pNode);
|
|||
int32_t nodesSetValueNodeValue(SValueNode* pNode, void* value);
|
||||
char* nodesGetStrValueFromNode(SValueNode* pNode);
|
||||
void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal);
|
||||
SValueNode* nodesMakeValueNodeFromString(char* literal);
|
||||
SValueNode* nodesMakeValueNodeFromBool(bool b);
|
||||
|
||||
char* nodesGetFillModeString(EFillMode mode);
|
||||
int32_t nodesMergeConds(SNode** pDst, SNodeList** pSrc);
|
||||
|
@ -526,6 +537,9 @@ int32_t nodesMergeConds(SNode** pDst, SNodeList** pSrc);
|
|||
const char* operatorTypeStr(EOperatorType type);
|
||||
const char* logicConditionTypeStr(ELogicConditionType type);
|
||||
|
||||
bool nodesIsStar(SNode* pNode);
|
||||
bool nodesIsTableStar(SNode* pNode);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -64,6 +64,7 @@ typedef struct SParseContext {
|
|||
SArray* pTableMetaPos; // sql table pos => catalog data pos
|
||||
SArray* pTableVgroupPos; // sql table pos => catalog data pos
|
||||
int64_t allocatorId;
|
||||
int8_t biMode;
|
||||
} SParseContext;
|
||||
|
||||
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
|
||||
|
|
|
@ -49,26 +49,30 @@ void streamStateSetNumber(SStreamState* pState, int32_t number);
|
|||
int32_t streamStateSaveInfo(SStreamState* pState, void* pKey, int32_t keyLen, void* pVal, int32_t vLen);
|
||||
int32_t streamStateGetInfo(SStreamState* pState, void* pKey, int32_t keyLen, void** pVal, int32_t* pLen);
|
||||
|
||||
//session window
|
||||
int32_t streamStateSessionAddIfNotExist(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateSessionPut(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen);
|
||||
int32_t streamStateSessionPut(SStreamState* pState, const SSessionKey* key, void* value, int32_t vLen);
|
||||
int32_t streamStateSessionGet(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateSessionDel(SStreamState* pState, const SSessionKey* key);
|
||||
int32_t streamStateSessionClear(SStreamState* pState);
|
||||
int32_t streamStateSessionGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateStateAddIfNotExist(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
|
||||
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateSessionGetKeyByRange(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
|
||||
|
||||
SStreamStateCur* streamStateSessionSeekKeyNext(SStreamState* pState, const SSessionKey* key);
|
||||
SStreamStateCur* streamStateSessionSeekKeyCurrentPrev(SStreamState* pState, const SSessionKey* key);
|
||||
SStreamStateCur* streamStateSessionSeekKeyCurrentNext(SStreamState* pState, const SSessionKey* key);
|
||||
|
||||
//state window
|
||||
int32_t streamStateStateAddIfNotExist(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
|
||||
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
|
||||
|
||||
int32_t streamStateFillPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
|
||||
int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateFillDel(SStreamState* pState, const SWinKey* key);
|
||||
|
||||
int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal);
|
||||
int32_t streamStateReleaseBuf(SStreamState* pState, void* pVal, bool used);
|
||||
int32_t streamStateClearBuff(SStreamState* pState, void* pVal);
|
||||
void streamStateFreeVal(void* val);
|
||||
|
||||
SStreamStateCur* streamStateGetAndCheckCur(SStreamState* pState, SWinKey* key);
|
||||
|
@ -76,14 +80,11 @@ SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key
|
|||
SStreamStateCur* streamStateFillSeekKeyNext(SStreamState* pState, const SWinKey* key);
|
||||
SStreamStateCur* streamStateFillSeekKeyPrev(SStreamState* pState, const SWinKey* key);
|
||||
void streamStateFreeCur(SStreamStateCur* pCur);
|
||||
void streamStateResetCur(SStreamStateCur* pCur);
|
||||
|
||||
int32_t streamStateGetGroupKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
|
||||
|
||||
int32_t streamStateGetFirst(SStreamState* pState, SWinKey* key);
|
||||
int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur);
|
||||
int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur);
|
||||
|
||||
int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur);
|
||||
int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
|
||||
|
||||
|
@ -91,6 +92,7 @@ int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char*
|
|||
int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal);
|
||||
|
||||
void streamStateReloadInfo(SStreamState* pState, TSKEY ts);
|
||||
SStreamStateCur* createStreamStateCursor();
|
||||
|
||||
/***compare func **/
|
||||
|
||||
|
|
|
@ -29,7 +29,22 @@ extern "C" {
|
|||
#ifndef _STREAM_H_
|
||||
#define _STREAM_H_
|
||||
|
||||
typedef struct SStreamTask SStreamTask;
|
||||
#define ONE_MiB_F (1048576.0)
|
||||
#define ONE_KiB_F (1024.0)
|
||||
#define SIZE_IN_MiB(_v) ((_v) / ONE_MiB_F)
|
||||
#define SIZE_IN_KiB(_v) ((_v) / ONE_KiB_F)
|
||||
|
||||
#define TASK_DOWNSTREAM_READY 0x0
|
||||
#define TASK_DOWNSTREAM_NOT_READY 0x1
|
||||
#define TASK_DOWNSTREAM_NOT_LEADER 0x2
|
||||
#define TASK_SELF_NEW_STAGE 0x3
|
||||
|
||||
#define NODE_ROLE_UNINIT 0x1
|
||||
#define NODE_ROLE_LEADER 0x2
|
||||
#define NODE_ROLE_FOLLOWER 0x3
|
||||
|
||||
typedef struct SStreamTask SStreamTask;
|
||||
typedef struct SStreamQueue SStreamQueue;
|
||||
|
||||
#define SSTREAM_TASK_VER 3
|
||||
#define SSTREAM_TASK_INCOMPATIBLE_VER 1
|
||||
|
@ -67,6 +82,7 @@ enum {
|
|||
TASK_INPUT_STATUS__NORMAL = 1,
|
||||
TASK_INPUT_STATUS__BLOCKED,
|
||||
TASK_INPUT_STATUS__FAILED,
|
||||
TASK_INPUT_STATUS__REFUSED,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -109,6 +125,7 @@ typedef struct {
|
|||
} SStreamQueueItem;
|
||||
|
||||
typedef void FTbSink(SStreamTask* pTask, void* vnode, void* data);
|
||||
typedef void FSmaSink(void* vnode, int64_t smaId, const SArray* data);
|
||||
typedef int32_t FTaskExpand(void* ahandle, SStreamTask* pTask, int64_t ver);
|
||||
|
||||
typedef struct {
|
||||
|
@ -157,8 +174,6 @@ typedef struct {
|
|||
int64_t size;
|
||||
} SStreamQueueRes;
|
||||
|
||||
void streamFreeQitem(SStreamQueueItem* data);
|
||||
|
||||
#if 0
|
||||
bool streamQueueResEmpty(const SStreamQueueRes* pRes);
|
||||
int64_t streamQueueResSize(const SStreamQueueRes* pRes);
|
||||
|
@ -178,22 +193,9 @@ int32_t streamQueuePush(SStreamQueue1* pQueue, SStreamQueueItem* pItem);
|
|||
SStreamQueueRes streamQueueGetRes(SStreamQueue1* pQueue);
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
STaosQueue* pQueue;
|
||||
STaosQall* qall;
|
||||
void* qItem;
|
||||
int8_t status;
|
||||
} SStreamQueue;
|
||||
|
||||
int32_t streamInit();
|
||||
void streamCleanUp();
|
||||
|
||||
SStreamQueue* streamQueueOpen(int64_t cap);
|
||||
void streamQueueClose(SStreamQueue* pQueue, int32_t taskId);
|
||||
void streamQueueProcessSuccess(SStreamQueue* queue);
|
||||
void streamQueueProcessFail(SStreamQueue* queue);
|
||||
void* streamQueueNextItem(SStreamQueue* pQueue);
|
||||
|
||||
SStreamDataSubmit* streamDataSubmitNew(SPackedData* pData, int32_t type);
|
||||
void streamDataSubmitDestroy(SStreamDataSubmit* pDataSubmit);
|
||||
|
||||
|
@ -207,7 +209,7 @@ typedef struct {
|
|||
int32_t taskId;
|
||||
int32_t nodeId;
|
||||
SEpSet epSet;
|
||||
} STaskDispatcherFixedEp;
|
||||
} STaskDispatcherFixed;
|
||||
|
||||
typedef struct {
|
||||
char stbFullName[TSDB_TABLE_FNAME_LEN];
|
||||
|
@ -225,8 +227,6 @@ typedef struct {
|
|||
SSHashObj* pTblInfo;
|
||||
} STaskSinkTb;
|
||||
|
||||
typedef void FSmaSink(void* vnode, int64_t smaId, const SArray* data);
|
||||
|
||||
typedef struct {
|
||||
int64_t smaId;
|
||||
// following are not applicable to encoder and decoder
|
||||
|
@ -247,10 +247,10 @@ typedef struct SStreamChildEpInfo {
|
|||
int64_t stage; // upstream task stage value, to denote if the upstream node has restart/replica changed/transfer
|
||||
} SStreamChildEpInfo;
|
||||
|
||||
typedef struct SStreamTaskKey {
|
||||
typedef struct STaskId {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
} SStreamTaskKey;
|
||||
int64_t taskId;
|
||||
} STaskId;
|
||||
|
||||
typedef struct SStreamTaskId {
|
||||
int64_t streamId;
|
||||
|
@ -259,20 +259,23 @@ typedef struct SStreamTaskId {
|
|||
} SStreamTaskId;
|
||||
|
||||
typedef struct SCheckpointInfo {
|
||||
int64_t startTs;
|
||||
int64_t checkpointId;
|
||||
int64_t checkpointVer; // latest checkpointId version
|
||||
int64_t nextProcessVer; // current offset in WAL, not serialize it
|
||||
int64_t failedId; // record the latest failed checkpoint id
|
||||
int64_t msgVer;
|
||||
} SCheckpointInfo;
|
||||
|
||||
typedef struct SStreamStatus {
|
||||
int8_t taskStatus;
|
||||
int8_t downstreamReady; // downstream tasks are all ready now, if this flag is set
|
||||
int8_t schedStatus;
|
||||
int8_t keepTaskStatus;
|
||||
bool appendTranstateBlock; // has append the transfer state data block already, todo: remove it
|
||||
int8_t timerActive; // timer is active
|
||||
int8_t pauseAllowed; // allowed task status to be set to be paused
|
||||
int8_t taskStatus;
|
||||
int8_t downstreamReady; // downstream tasks are all ready now, if this flag is set
|
||||
int8_t schedStatus;
|
||||
int8_t keepTaskStatus;
|
||||
bool appendTranstateBlock; // has append the transfer state data block already, todo: remove it
|
||||
int8_t pauseAllowed; // allowed task status to be set to be paused
|
||||
int32_t timerActive; // timer is active
|
||||
int32_t inScanHistorySentinel;
|
||||
} SStreamStatus;
|
||||
|
||||
typedef struct SDataRange {
|
||||
|
@ -291,18 +294,24 @@ typedef struct SSTaskBasicInfo {
|
|||
int64_t triggerParam; // in msec
|
||||
} SSTaskBasicInfo;
|
||||
|
||||
typedef struct SStreamDispatchReq SStreamDispatchReq;
|
||||
typedef struct STokenBucket STokenBucket;
|
||||
typedef struct SMetaHbInfo SMetaHbInfo;
|
||||
|
||||
typedef struct SDispatchMsgInfo {
|
||||
void* pData; // current dispatch data
|
||||
int16_t msgType; // dispatch msg type
|
||||
int32_t retryCount; // retry send data count
|
||||
int64_t blockingTs; // output blocking timestamp
|
||||
SStreamDispatchReq* pData; // current dispatch data
|
||||
int8_t dispatchMsgType;
|
||||
int16_t msgType; // dispatch msg type
|
||||
int32_t retryCount; // retry send data count
|
||||
int64_t startTs; // dispatch start time, record total elapsed time for dispatch
|
||||
SArray* pRetryList; // current dispatch successfully completed node of downstream
|
||||
void* pTimer; // used to dispatch data after a given time duration
|
||||
} SDispatchMsgInfo;
|
||||
|
||||
typedef struct STaskOutputInfo {
|
||||
int8_t type;
|
||||
typedef struct STaskOutputQueue {
|
||||
int8_t status;
|
||||
SStreamQueue* queue;
|
||||
} STaskOutputInfo;
|
||||
} STaskOutputQueue;
|
||||
|
||||
typedef struct STaskInputInfo {
|
||||
int8_t status;
|
||||
|
@ -314,62 +323,76 @@ typedef struct STaskSchedInfo {
|
|||
void* pTimer;
|
||||
} STaskSchedInfo;
|
||||
|
||||
typedef struct SSinkTaskRecorder {
|
||||
typedef struct SSinkRecorder {
|
||||
int64_t numOfSubmit;
|
||||
int64_t numOfBlocks;
|
||||
int64_t numOfRows;
|
||||
} SSinkTaskRecorder;
|
||||
int64_t dataSize;
|
||||
} SSinkRecorder;
|
||||
|
||||
typedef struct {
|
||||
int64_t created;
|
||||
int64_t init;
|
||||
int64_t step1Start;
|
||||
int64_t step2Start;
|
||||
int64_t sinkStart;
|
||||
} STaskTimestamp;
|
||||
typedef struct STaskExecStatisInfo {
|
||||
int64_t created;
|
||||
int64_t init;
|
||||
int64_t start;
|
||||
int64_t step1Start;
|
||||
int64_t step2Start;
|
||||
int32_t updateCount;
|
||||
int64_t latestUpdateTs;
|
||||
int32_t processDataBlocks;
|
||||
int64_t processDataSize;
|
||||
int32_t dispatch;
|
||||
int64_t dispatchDataSize;
|
||||
int32_t checkpoint;
|
||||
SSinkRecorder sink;
|
||||
} STaskExecStatisInfo;
|
||||
|
||||
typedef struct STokenBucket {
|
||||
int32_t capacity; // total capacity
|
||||
int64_t fillTimestamp; // fill timestamp
|
||||
int32_t numOfToken; // total available tokens
|
||||
int32_t rate; // number of token per second
|
||||
} STokenBucket;
|
||||
typedef struct SHistoryTaskInfo {
|
||||
STaskId id;
|
||||
void* pTimer;
|
||||
int32_t tickCount;
|
||||
int32_t retryTimes;
|
||||
int32_t waitInterval;
|
||||
} SHistoryTaskInfo;
|
||||
|
||||
struct SStreamTask {
|
||||
int64_t ver;
|
||||
SStreamTaskId id;
|
||||
SSTaskBasicInfo info;
|
||||
STaskOutputInfo outputInfo;
|
||||
STaskInputInfo inputInfo;
|
||||
STaskSchedInfo schedInfo;
|
||||
SDispatchMsgInfo msgInfo;
|
||||
SStreamStatus status;
|
||||
SCheckpointInfo chkInfo;
|
||||
STaskExec exec;
|
||||
SDataRange dataRange;
|
||||
SStreamTaskId historyTaskId;
|
||||
SStreamTaskId streamTaskId;
|
||||
STaskTimestamp tsInfo;
|
||||
SArray* pReadyMsgList; // SArray<SStreamChkptReadyInfo*>
|
||||
TdThreadMutex lock; // secure the operation of set task status and puting data into inputQ
|
||||
SArray* pUpstreamInfoList;
|
||||
|
||||
// output
|
||||
typedef struct STaskOutputInfo {
|
||||
union {
|
||||
STaskDispatcherFixedEp fixedEpDispatcher;
|
||||
STaskDispatcherFixed fixedDispatcher;
|
||||
STaskDispatcherShuffle shuffleDispatcher;
|
||||
STaskSinkTb tbSink;
|
||||
STaskSinkSma smaSink;
|
||||
STaskSinkFetch fetchSink;
|
||||
};
|
||||
SSinkTaskRecorder sinkRecorder;
|
||||
STokenBucket tokenBucket;
|
||||
int8_t type;
|
||||
STokenBucket* pTokenBucket;
|
||||
} STaskOutputInfo;
|
||||
|
||||
void* launchTaskTimer;
|
||||
SMsgCb* pMsgCb; // msg handle
|
||||
SStreamState* pState; // state backend
|
||||
SArray* pRspMsgList;
|
||||
typedef struct SUpstreamInfo {
|
||||
SArray* pList;
|
||||
int32_t numOfClosed;
|
||||
} SUpstreamInfo;
|
||||
|
||||
struct SStreamTask {
|
||||
int64_t ver;
|
||||
SStreamTaskId id;
|
||||
SSTaskBasicInfo info;
|
||||
STaskOutputQueue outputq;
|
||||
STaskInputInfo inputInfo;
|
||||
STaskSchedInfo schedInfo;
|
||||
STaskOutputInfo outputInfo;
|
||||
SDispatchMsgInfo msgInfo;
|
||||
SStreamStatus status;
|
||||
SCheckpointInfo chkInfo;
|
||||
STaskExec exec;
|
||||
SDataRange dataRange;
|
||||
SHistoryTaskInfo hTaskInfo;
|
||||
STaskId streamTaskId;
|
||||
STaskExecStatisInfo execInfo;
|
||||
SArray* pReadyMsgList; // SArray<SStreamChkptReadyInfo*>
|
||||
TdThreadMutex lock; // secure the operation of set task status and puting data into inputQ
|
||||
SMsgCb* pMsgCb; // msg handle
|
||||
SStreamState* pState; // state backend
|
||||
SArray* pRspMsgList;
|
||||
SUpstreamInfo upstreamInfo;
|
||||
// the followings attributes don't be serialized
|
||||
int32_t notReadyTasks;
|
||||
int32_t numOfWaitingUpstream;
|
||||
|
@ -387,64 +410,68 @@ struct SStreamTask {
|
|||
char reserve[256];
|
||||
};
|
||||
|
||||
typedef struct SMetaHbInfo {
|
||||
tmr_h hbTmr;
|
||||
int32_t stopFlag;
|
||||
int32_t tickCounter;
|
||||
} SMetaHbInfo;
|
||||
typedef struct STaskStartInfo {
|
||||
int64_t startTs;
|
||||
int64_t readyTs;
|
||||
int32_t startedAfterNodeUpdate;
|
||||
SHashObj* pReadyTaskSet; // tasks that are all ready for running stream processing
|
||||
int32_t elapsedTime;
|
||||
} STaskStartInfo;
|
||||
|
||||
// meta
|
||||
typedef struct SStreamMeta {
|
||||
char* path;
|
||||
TDB* db;
|
||||
TTB* pTaskDb;
|
||||
TTB* pCheckpointDb;
|
||||
SHashObj* pTasks;
|
||||
SArray* pTaskList; // SArray<task_id*>
|
||||
void* ahandle;
|
||||
TXN* txn;
|
||||
FTaskExpand* expandFunc;
|
||||
int32_t vgId;
|
||||
int64_t stage;
|
||||
SRWLatch lock;
|
||||
int32_t walScanCounter;
|
||||
void* streamBackend;
|
||||
int64_t streamBackendRid;
|
||||
SHashObj* pTaskDbUnique;
|
||||
TdThreadMutex backendMutex;
|
||||
SMetaHbInfo hbInfo;
|
||||
int32_t closedTask;
|
||||
int32_t totalTasks; // this value should be increased when a new task is added into the meta
|
||||
int32_t chkptNotReadyTasks;
|
||||
int64_t rid;
|
||||
char* path;
|
||||
TDB* db;
|
||||
TTB* pTaskDb;
|
||||
TTB* pCheckpointDb;
|
||||
SHashObj* pTasksMap;
|
||||
SArray* pTaskList; // SArray<STaskId*>
|
||||
void* ahandle;
|
||||
TXN* txn;
|
||||
FTaskExpand* expandFunc;
|
||||
int32_t vgId;
|
||||
int64_t stage;
|
||||
int32_t role;
|
||||
STaskStartInfo startInfo;
|
||||
SRWLatch lock;
|
||||
int32_t walScanCounter;
|
||||
void* streamBackend;
|
||||
int64_t streamBackendRid;
|
||||
SHashObj* pTaskDbUnique;
|
||||
TdThreadMutex backendMutex;
|
||||
SMetaHbInfo* pHbInfo;
|
||||
SHashObj* pUpdateTaskSet;
|
||||
int32_t numOfStreamTasks; // this value should be increased when a new task is added into the meta
|
||||
int32_t numOfPausedTasks;
|
||||
int32_t chkptNotReadyTasks;
|
||||
int64_t rid;
|
||||
|
||||
int64_t chkpId;
|
||||
int32_t chkpCap;
|
||||
SArray* chkpSaved;
|
||||
SArray* chkpInUse;
|
||||
SRWLatch chkpDirLock;
|
||||
int32_t pauseTaskNum;
|
||||
|
||||
// SHashObj* pTaskDb;
|
||||
int32_t pauseTaskNum;
|
||||
} SStreamMeta;
|
||||
|
||||
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
|
||||
int32_t tDecodeStreamEpInfo(SDecoder* pDecoder, SStreamChildEpInfo* pInfo);
|
||||
|
||||
SStreamTask* tNewStreamTask(int64_t streamId, int8_t taskLevel, int8_t fillHistory, int64_t triggerParam,
|
||||
SArray* pTaskList);
|
||||
SStreamTask* tNewStreamTask(int64_t streamId, int8_t taskLevel, bool fillHistory, int64_t triggerParam,
|
||||
SArray* pTaskList, bool hasFillhistory);
|
||||
int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
|
||||
int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
|
||||
void tFreeStreamTask(SStreamTask* pTask);
|
||||
int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, int64_t ver);
|
||||
|
||||
int32_t tDecodeStreamTaskChkInfo(SDecoder* pDecoder, SCheckpointInfo* pChkpInfo);
|
||||
int32_t tDecodeStreamTaskId(SDecoder* pDecoder, SStreamTaskId* pTaskId);
|
||||
int32_t tDecodeStreamTaskId(SDecoder* pDecoder, STaskId* pTaskId);
|
||||
|
||||
int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem);
|
||||
int32_t streamTaskPutDataIntoOutputQ(SStreamTask* pTask, SStreamDataBlock* pBlock);
|
||||
int32_t streamTaskPutTranstateIntoInputQ(SStreamTask* pTask);
|
||||
bool streamQueueIsFull(const STaosQueue* pQueue, bool inputQ);
|
||||
bool streamQueueIsFull(const SStreamQueue* pQueue);
|
||||
|
||||
typedef struct {
|
||||
SMsgHead head;
|
||||
|
@ -452,11 +479,12 @@ typedef struct {
|
|||
int32_t taskId;
|
||||
} SStreamTaskRunReq;
|
||||
|
||||
typedef struct {
|
||||
struct SStreamDispatchReq {
|
||||
int32_t type;
|
||||
int64_t stage; // nodeId from upstream task
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
int32_t msgId; // msg id to identify if the incoming msg from the same sender
|
||||
int32_t srcVgId;
|
||||
int32_t upstreamTaskId;
|
||||
int32_t upstreamChildId;
|
||||
|
@ -465,7 +493,7 @@ typedef struct {
|
|||
int64_t totalLen;
|
||||
SArray* dataLen; // SArray<int32_t>
|
||||
SArray* data; // SArray<SRetrieveTableRsp*>
|
||||
} SStreamDispatchReq;
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
|
@ -473,7 +501,9 @@ typedef struct {
|
|||
int32_t upstreamTaskId;
|
||||
int32_t downstreamNodeId;
|
||||
int32_t downstreamTaskId;
|
||||
int32_t msgId;
|
||||
int8_t inputStatus;
|
||||
int64_t stage;
|
||||
} SStreamDispatchRsp;
|
||||
|
||||
typedef struct {
|
||||
|
@ -530,7 +560,7 @@ typedef struct {
|
|||
int32_t downstreamTaskId;
|
||||
int32_t upstreamNodeId;
|
||||
int32_t childId;
|
||||
} SStreamScanHistoryFinishReq, SStreamTransferReq;
|
||||
} SStreamScanHistoryFinishReq;
|
||||
|
||||
int32_t tEncodeStreamScanHistoryFinishReq(SEncoder* pEncoder, const SStreamScanHistoryFinishReq* pReq);
|
||||
int32_t tDecodeStreamScanHistoryFinishReq(SDecoder* pDecoder, SStreamScanHistoryFinishReq* pReq);
|
||||
|
@ -576,9 +606,19 @@ int32_t tEncodeStreamCheckpointReadyMsg(SEncoder* pEncoder, const SStreamCheckpo
|
|||
int32_t tDecodeStreamCheckpointReadyMsg(SDecoder* pDecoder, SStreamCheckpointReadyMsg* pRsp);
|
||||
|
||||
typedef struct STaskStatusEntry {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
STaskId id;
|
||||
int32_t status;
|
||||
int32_t stage;
|
||||
int32_t nodeId;
|
||||
int64_t verStart; // start version in WAL, only valid for source task
|
||||
int64_t verEnd; // end version in WAL, only valid for source task
|
||||
int64_t processedVer; // only valid for source task
|
||||
int64_t activeCheckpointId; // current active checkpoint id
|
||||
bool checkpointFailed; // denote if the checkpoint is failed or not
|
||||
double inputQUsed; // in MiB
|
||||
double inputRate;
|
||||
double sinkQuota; // existed quota size for sink task
|
||||
double sinkDataSize; // sink to dest data size
|
||||
} STaskStatusEntry;
|
||||
|
||||
typedef struct SStreamHbMsg {
|
||||
|
@ -644,15 +684,14 @@ void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq);
|
|||
|
||||
int32_t streamSetupScheduleTrigger(SStreamTask* pTask);
|
||||
|
||||
int32_t streamProcessRunReq(SStreamTask* pTask);
|
||||
int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg, bool exec);
|
||||
int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg);
|
||||
int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code);
|
||||
|
||||
int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pMsg);
|
||||
SStreamChildEpInfo* streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t taskId);
|
||||
|
||||
void streamTaskInputFail(SStreamTask* pTask);
|
||||
int32_t streamTryExec(SStreamTask* pTask);
|
||||
int32_t streamExecTask(SStreamTask* pTask);
|
||||
int32_t streamSchedExec(SStreamTask* pTask);
|
||||
bool streamTaskShouldStop(const SStreamStatus* pStatus);
|
||||
bool streamTaskShouldPause(const SStreamStatus* pStatus);
|
||||
|
@ -664,10 +703,14 @@ char* createStreamTaskIdStr(int64_t streamId, int32_t taskId);
|
|||
|
||||
// recover and fill history
|
||||
void streamTaskCheckDownstream(SStreamTask* pTask);
|
||||
int32_t streamTaskLaunchScanHistory(SStreamTask* pTask);
|
||||
int32_t streamTaskStartScanHistory(SStreamTask* pTask);
|
||||
int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_t vgId, int64_t stage);
|
||||
int32_t streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList);
|
||||
void streamTaskResetUpstreamStageInfo(SStreamTask* pTask);
|
||||
bool streamTaskAllUpstreamClosed(SStreamTask* pTask);
|
||||
bool streamTaskSetSchedStatusWait(SStreamTask* pTask);
|
||||
int8_t streamTaskSetSchedStatusActive(SStreamTask* pTask);
|
||||
int8_t streamTaskSetSchedStatusInActive(SStreamTask* pTask);
|
||||
|
||||
int32_t streamTaskStop(SStreamTask* pTask);
|
||||
int32_t streamSendCheckRsp(const SStreamMeta* pMeta, const SStreamTaskCheckReq* pReq, SStreamTaskCheckRsp* pRsp,
|
||||
|
@ -678,14 +721,15 @@ int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask);
|
|||
int32_t streamStartScanHistoryAsync(SStreamTask* pTask, int8_t igUntreated);
|
||||
bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer);
|
||||
int32_t streamQueueGetNumOfItems(const SStreamQueue* pQueue);
|
||||
int32_t streamQueueGetAvailableSpace(const SStreamQueue* pQueue, int32_t* availNum, double* availSize);
|
||||
|
||||
// common
|
||||
int32_t streamRestoreParam(SStreamTask* pTask);
|
||||
int32_t streamSetStatusNormal(SStreamTask* pTask);
|
||||
int32_t streamSetStatusUnint(SStreamTask* pTask);
|
||||
const char* streamGetTaskStatusStr(int32_t status);
|
||||
void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta);
|
||||
void streamTaskResume(SStreamTask* pTask, SStreamMeta* pMeta);
|
||||
void streamTaskHalt(SStreamTask* pTask);
|
||||
void streamTaskResumeFromHalt(SStreamTask* pTask);
|
||||
void streamTaskDisablePause(SStreamTask* pTask);
|
||||
void streamTaskEnablePause(SStreamTask* pTask);
|
||||
|
@ -698,6 +742,9 @@ int32_t streamTaskReloadState(SStreamTask* pTask);
|
|||
void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId);
|
||||
void streamTaskOpenAllUpstreamInput(SStreamTask* pTask);
|
||||
|
||||
void streamTaskStatusInit(STaskStatusEntry* pEntry, const SStreamTask* pTask);
|
||||
void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc);
|
||||
|
||||
// source level
|
||||
int32_t streamSetParamForStreamScannerStep1(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
|
||||
int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
|
||||
|
@ -715,26 +762,29 @@ void streamMetaCleanup();
|
|||
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId, int64_t stage);
|
||||
void streamMetaClose(SStreamMeta* streamMeta);
|
||||
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask); // save to stream meta store
|
||||
int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int64_t* pKey);
|
||||
int32_t streamMetaRemoveTask(SStreamMeta* pMeta, STaskId* pKey);
|
||||
int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask, bool* pAdded);
|
||||
int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
|
||||
int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta);
|
||||
int32_t streamMetaGetNumOfStreamTasks(SStreamMeta* pMeta);
|
||||
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
|
||||
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
||||
int32_t streamMetaReopen(SStreamMeta* pMeta, int64_t chkpId);
|
||||
int32_t streamMetaReopen(SStreamMeta* pMeta);
|
||||
int32_t streamMetaCommit(SStreamMeta* pMeta);
|
||||
int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta);
|
||||
int32_t streamMetaReloadAllTasks(SStreamMeta* pMeta);
|
||||
void streamMetaNotifyClose(SStreamMeta* pMeta);
|
||||
int32_t streamTaskSetDb(SStreamMeta* pMeta, void* pTask);
|
||||
void streamMetaStartHb(SStreamMeta* pMeta);
|
||||
void streamMetaInitForSnode(SStreamMeta* pMeta);
|
||||
|
||||
// checkpoint
|
||||
int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);
|
||||
int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask);
|
||||
void streamTaskClearCheckInfo(SStreamTask* pTask);
|
||||
|
||||
int32_t streamAlignTransferState(SStreamTask* pTask);
|
||||
|
||||
int32_t streamBuildAndSendDropTaskMsg(SMsgCb* pMsgCb, int32_t vgId, SStreamTaskId* pTaskId);
|
||||
int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SStreamTask* pTask,
|
||||
int8_t isSucceed);
|
||||
int32_t buildCheckpointSourceRsp(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SRpcMsg* pMsg,
|
||||
|
|
|
@ -28,20 +28,33 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
typedef struct SStreamFileState SStreamFileState;
|
||||
typedef SList SStreamSnapshot;
|
||||
typedef SList SStreamSnapshot;
|
||||
|
||||
typedef void* (*_state_buff_get_fn)(void* pRowBuff, const void* pKey, size_t keyLen);
|
||||
typedef int32_t (*_state_buff_put_fn)(void* pRowBuff, const void* pKey, size_t keyLen, const void* data, size_t dataLen);
|
||||
typedef int32_t (*_state_buff_remove_fn)(void* pRowBuff, const void* pKey, size_t keyLen);
|
||||
typedef int32_t (*_state_buff_remove_by_pos_fn)(SStreamFileState* pState, SRowBuffPos* pPos);
|
||||
typedef void (*_state_buff_cleanup_fn)(void* pRowBuff);
|
||||
typedef void* (*_state_buff_create_statekey_fn)(SRowBuffPos* pPos, int64_t num);
|
||||
|
||||
typedef int32_t (*_state_file_remove_fn)(SStreamFileState* pFileState, const void* pKey);
|
||||
typedef int32_t (*_state_file_get_fn)(SStreamFileState* pFileState, void* pKey, void* data, int32_t* pDataLen);
|
||||
typedef int32_t (*_state_file_clear_fn)(SStreamState* pState);
|
||||
|
||||
SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize,
|
||||
GetTsFun fp, void* pFile, TSKEY delMark, const char* taskId,
|
||||
int64_t checkpointId);
|
||||
int64_t checkpointId, int8_t type);
|
||||
void streamFileStateDestroy(SStreamFileState* pFileState);
|
||||
void streamFileStateClear(SStreamFileState* pFileState);
|
||||
bool needClearDiskBuff(SStreamFileState* pFileState);
|
||||
void streamFileStateReleaseBuff(SStreamFileState* pFileState, SRowBuffPos* pPos, bool used);
|
||||
int32_t streamFileStateClearBuff(SStreamFileState* pFileState, SRowBuffPos* pPos);
|
||||
|
||||
int32_t getRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen);
|
||||
int32_t deleteRowBuff(SStreamFileState* pFileState, const void* pKey, int32_t keyLen);
|
||||
int32_t getRowBuffByPos(SStreamFileState* pFileState, SRowBuffPos* pPos, void** pVal);
|
||||
void releaseRowBuffPos(SRowBuffPos* pBuff);
|
||||
bool hasRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen);
|
||||
void putFreeBuff(SStreamFileState* pFileState, SRowBuffPos* pPos);
|
||||
|
||||
SStreamSnapshot* getSnapshot(SStreamFileState* pFileState);
|
||||
int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, bool flushState);
|
||||
|
@ -52,6 +65,37 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark);
|
|||
int32_t streamFileStateGeSelectRowSize(SStreamFileState* pFileState);
|
||||
void streamFileStateReloadInfo(SStreamFileState* pFileState, TSKEY ts);
|
||||
|
||||
void* getRowStateBuff(SStreamFileState* pFileState);
|
||||
void* getStateFileStore(SStreamFileState* pFileState);
|
||||
bool isDeteled(SStreamFileState* pFileState, TSKEY ts);
|
||||
bool isFlushedState(SStreamFileState* pFileState, TSKEY ts, TSKEY gap);
|
||||
SRowBuffPos* getNewRowPosForWrite(SStreamFileState* pFileState);
|
||||
int32_t getRowStateRowSize(SStreamFileState* pFileState);
|
||||
|
||||
// session window
|
||||
int32_t getSessionWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, TSKEY gap, void** pVal, int32_t* pVLen);
|
||||
int32_t putSessionWinResultBuff(SStreamFileState* pFileState, SRowBuffPos* pPos);
|
||||
int32_t getSessionFlushedBuff(SStreamFileState* pFileState, SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||
int32_t deleteSessionWinStateBuffFn(void* pBuff, const void *key, size_t keyLen);
|
||||
int32_t deleteSessionWinStateBuffByPosFn(SStreamFileState* pFileState, SRowBuffPos* pPos);
|
||||
|
||||
SRowBuffPos* createSessionWinBuff(SStreamFileState* pFileState, SSessionKey* pKey, void* p, int32_t* pVLen);
|
||||
int32_t recoverSesssion(SStreamFileState* pFileState, int64_t ckId);
|
||||
|
||||
void sessionWinStateClear(SStreamFileState* pFileState);
|
||||
void sessionWinStateCleanup(void* pBuff);
|
||||
|
||||
SStreamStateCur* sessionWinStateSeekKeyCurrentPrev(SStreamFileState* pFileState, const SSessionKey* pWinKey);
|
||||
SStreamStateCur* sessionWinStateSeekKeyCurrentNext(SStreamFileState* pFileState, const SSessionKey* pWinKey);
|
||||
SStreamStateCur* sessionWinStateSeekKeyNext(SStreamFileState* pFileState, const SSessionKey* pWinKey);
|
||||
int32_t sessionWinStateGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||
int32_t sessionWinStateMoveToNext(SStreamStateCur* pCur);
|
||||
int32_t sessionWinStateGetKeyByRange(SStreamFileState* pFileState, const SSessionKey* key, SSessionKey* curKey);
|
||||
|
||||
// state window
|
||||
int32_t getStateWinResultBuff(SStreamFileState* pFileState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
|
||||
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -36,8 +36,7 @@ extern "C" {
|
|||
#define SYNC_DEL_WAL_MS (1000 * 60)
|
||||
#define SYNC_ADD_QUORUM_COUNT 3
|
||||
#define SYNC_VNODE_LOG_RETENTION (TSDB_SYNC_LOG_BUFFER_RETENTION + 1)
|
||||
#define SNAPSHOT_MAX_CLOCK_SKEW_MS 1000 * 10
|
||||
#define SNAPSHOT_WAIT_MS 1000 * 30
|
||||
#define SNAPSHOT_WAIT_MS 1000 * 5
|
||||
|
||||
#define SYNC_MAX_RETRY_BACKOFF 5
|
||||
#define SYNC_LOG_REPL_RETRY_WAIT_MS 100
|
||||
|
@ -87,6 +86,11 @@ typedef enum {
|
|||
TAOS_SYNC_ROLE_ERROR = 2,
|
||||
} ESyncRole;
|
||||
|
||||
typedef enum {
|
||||
SYNC_FSM_STATE_COMPLETE = 0,
|
||||
SYNC_FSM_STATE_INCOMPLETE,
|
||||
} ESyncFsmState;
|
||||
|
||||
typedef struct SNodeInfo {
|
||||
int64_t clusterId;
|
||||
int32_t nodeId;
|
||||
|
@ -95,6 +99,12 @@ typedef struct SNodeInfo {
|
|||
ESyncRole nodeRole;
|
||||
} SNodeInfo;
|
||||
|
||||
typedef struct SSyncTLV {
|
||||
int32_t typ;
|
||||
int32_t len;
|
||||
char val[];
|
||||
} SSyncTLV;
|
||||
|
||||
typedef struct SSyncCfg {
|
||||
int32_t totalReplicaNum;
|
||||
int32_t replicaNum;
|
||||
|
@ -139,10 +149,13 @@ typedef struct SReConfigCbMeta {
|
|||
typedef struct SSnapshotParam {
|
||||
SyncIndex start;
|
||||
SyncIndex end;
|
||||
SSyncTLV* data;
|
||||
} SSnapshotParam;
|
||||
|
||||
typedef struct SSnapshot {
|
||||
void* data;
|
||||
int32_t type;
|
||||
SSyncTLV* data;
|
||||
ESyncFsmState state;
|
||||
SyncIndex lastApplyIndex;
|
||||
SyncTerm lastApplyTerm;
|
||||
SyncIndex lastConfigIndex;
|
||||
|
@ -171,7 +184,7 @@ typedef struct SSyncFSM {
|
|||
void (*FpBecomeLearnerCb)(const struct SSyncFSM* pFsm);
|
||||
|
||||
int32_t (*FpGetSnapshot)(const struct SSyncFSM* pFsm, SSnapshot* pSnapshot, void* pReaderParam, void** ppReader);
|
||||
void (*FpGetSnapshotInfo)(const struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
|
||||
int32_t (*FpGetSnapshotInfo)(const struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
|
||||
|
||||
int32_t (*FpSnapshotStartRead)(const struct SSyncFSM* pFsm, void* pReaderParam, void** ppReader);
|
||||
void (*FpSnapshotStopRead)(const struct SSyncFSM* pFsm, void* pReader);
|
||||
|
|
|
@ -163,6 +163,7 @@ int rpcReleaseHandle(void *handle, int8_t type); // just release conn to rpc in
|
|||
// These functions will not be called in the child process
|
||||
int rpcSendRequestWithCtx(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid, SRpcCtx *ctx);
|
||||
int rpcSendRecv(void *shandle, SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp);
|
||||
int rpcSendRecvWithTimeout(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp, int32_t timeoutMs);
|
||||
int rpcSetDefaultAddr(void *thandle, const char *ip, const char *fqdn);
|
||||
void *rpcAllocHandle();
|
||||
void rpcSetIpWhite(void *thandl, void *arg);
|
||||
|
|
|
@ -225,7 +225,10 @@ void syslog(int unused, const char *format, ...);
|
|||
#endif
|
||||
#else
|
||||
// Windows
|
||||
#define setThreadName(name)
|
||||
#define setThreadName(name) \
|
||||
do { \
|
||||
pthread_setname_np(taosThreadSelf(), (name)); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#if defined(_WIN32)
|
||||
|
|
|
@ -33,6 +33,17 @@ int tsem_timewait(tsem_t *sim, int64_t milis);
|
|||
int tsem_post(tsem_t *sem);
|
||||
int tsem_destroy(tsem_t *sem);
|
||||
|
||||
#elif defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
|
||||
#include <windows.h>
|
||||
|
||||
#define tsem_t HANDLE
|
||||
|
||||
int tsem_init(tsem_t *sem, int pshared, unsigned int value);
|
||||
int tsem_wait(tsem_t *sem);
|
||||
int tsem_timewait(tsem_t *sim, int64_t milis);
|
||||
int tsem_post(tsem_t *sem);
|
||||
int tsem_destroy(tsem_t *sem);
|
||||
|
||||
#else
|
||||
|
||||
#define tsem_t sem_t
|
||||
|
|
|
@ -22,6 +22,15 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if defined(WINDOWS) && !defined(__USE_PTHREAD)
|
||||
#include <windows.h>
|
||||
#define __USE_WIN_THREAD
|
||||
// https://learn.microsoft.com/en-us/windows/win32/winprog/using-the-windows-headers
|
||||
// #ifndef _WIN32_WINNT
|
||||
// #define _WIN32_WINNT 0x0600
|
||||
// #endif
|
||||
#endif
|
||||
|
||||
#if !defined(WINDOWS) && !defined(_ALPINE)
|
||||
#ifndef __USE_XOPEN2K
|
||||
#define TD_USE_SPINLOCK_AS_MUTEX
|
||||
|
@ -29,6 +38,22 @@ typedef pthread_mutex_t pthread_spinlock_t;
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef __USE_WIN_THREAD
|
||||
typedef pthread_t TdThread; // pthread api
|
||||
typedef pthread_spinlock_t TdThreadSpinlock; // pthread api
|
||||
typedef CRITICAL_SECTION TdThreadMutex; // windows api
|
||||
typedef HANDLE TdThreadMutexAttr; // windows api
|
||||
typedef struct {
|
||||
SRWLOCK lock;
|
||||
int8_t excl;
|
||||
} TdThreadRwlock; // windows api
|
||||
typedef pthread_attr_t TdThreadAttr; // pthread api
|
||||
typedef pthread_once_t TdThreadOnce; // pthread api
|
||||
typedef HANDLE TdThreadRwlockAttr; // windows api
|
||||
typedef CONDITION_VARIABLE TdThreadCond; // windows api
|
||||
typedef HANDLE TdThreadCondAttr; // windows api
|
||||
typedef pthread_key_t TdThreadKey; // pthread api
|
||||
#else
|
||||
typedef pthread_t TdThread;
|
||||
typedef pthread_spinlock_t TdThreadSpinlock;
|
||||
typedef pthread_mutex_t TdThreadMutex;
|
||||
|
@ -40,11 +65,14 @@ typedef pthread_rwlockattr_t TdThreadRwlockAttr;
|
|||
typedef pthread_cond_t TdThreadCond;
|
||||
typedef pthread_condattr_t TdThreadCondAttr;
|
||||
typedef pthread_key_t TdThreadKey;
|
||||
#endif
|
||||
|
||||
#define taosThreadCleanupPush pthread_cleanup_push
|
||||
#define taosThreadCleanupPop pthread_cleanup_pop
|
||||
|
||||
#ifdef WINDOWS
|
||||
#if defined(WINDOWS) && !defined(__USE_PTHREAD)
|
||||
#define TD_PTHREAD_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER_FORBID
|
||||
#elif defined(WINDOWS)
|
||||
#define TD_PTHREAD_MUTEX_INITIALIZER (TdThreadMutex)(-1)
|
||||
#else
|
||||
#define TD_PTHREAD_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue