Merge branch 'main' of github.com:taosdata/TDengine into docs/TD-33666-main
This commit is contained in:
commit
37c4d60f3a
|
@ -11,7 +11,7 @@ on:
|
|||
- 'packaging/**'
|
||||
- 'tests/**'
|
||||
- '*.md'
|
||||
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
@ -43,15 +43,33 @@ jobs:
|
|||
if: runner.os == 'Linux'
|
||||
run: |
|
||||
sudo apt update -y
|
||||
sudo apt install -y build-essential cmake \
|
||||
libgeos-dev libjansson-dev libsnappy-dev liblzma-dev libz-dev \
|
||||
zlib1g-dev pkg-config libssl-dev gawk
|
||||
sudo apt install -y \
|
||||
build-essential \
|
||||
cmake \
|
||||
gawk \
|
||||
libgeos-dev \
|
||||
libjansson-dev \
|
||||
liblzma-dev \
|
||||
libsnappy-dev \
|
||||
libssl-dev \
|
||||
libz-dev \
|
||||
pkg-config \
|
||||
zlib1g
|
||||
|
||||
- name: Install dependencies on macOS
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
brew update
|
||||
brew install argp-standalone gflags pkg-config snappy zlib geos jansson gawk openssl
|
||||
brew install \
|
||||
argp-standalone \
|
||||
gawk \
|
||||
gflags \
|
||||
geos \
|
||||
jansson \
|
||||
openssl \
|
||||
pkg-config \
|
||||
snappy \
|
||||
zlib
|
||||
|
||||
- name: Build and install TDengine
|
||||
run: |
|
||||
|
@ -80,7 +98,7 @@ jobs:
|
|||
run: |
|
||||
taosBenchmark -t 10 -n 10 -y
|
||||
taos -s "select count(*) from test.meters"
|
||||
|
||||
|
||||
- name: Clean up
|
||||
if: always()
|
||||
run: |
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
name: taosKeeper CI
|
||||
name: taosKeeper Build
|
||||
|
||||
on:
|
||||
push:
|
||||
|
@ -8,7 +8,7 @@ on:
|
|||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
name: Run unit tests
|
||||
name: Build and test on ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout the repository
|
10
README-CN.md
10
README-CN.md
|
@ -69,6 +69,8 @@ TDengine 是一款开源、高性能、云原生的时序数据库 (Time-Series
|
|||
|
||||
TDengine 目前可以在 Linux、 Windows、macOS 等平台上安装和运行。任何 OS 的应用也可以选择 taosAdapter 的 RESTful 接口连接服务端 taosd。CPU 支持 X64/ARM64,后续会支持 MIPS64、Alpha64、ARM32、RISC-V 等 CPU 架构。目前不支持使用交叉编译器构建。
|
||||
|
||||
如果你想要编译 taosAdapter 或者 taosKeeper,需要安装 Go 1.18 及以上版本。
|
||||
|
||||
## 3.1 Linux系统
|
||||
|
||||
<details>
|
||||
|
@ -153,6 +155,10 @@ cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true
|
|||
make
|
||||
```
|
||||
|
||||
如果你想要编译 taosAdapter,需要添加 `-DBUILD_HTTP=false` 选项。
|
||||
|
||||
如果你想要编译 taosKeeper,需要添加 `--DBUILD_KEEPER=true` 选项。
|
||||
|
||||
可以使用Jemalloc作为内存分配器,而不是使用glibc:
|
||||
|
||||
```bash
|
||||
|
@ -180,6 +186,10 @@ mkdir debug && cd debug
|
|||
cmake .. && cmake --build .
|
||||
```
|
||||
|
||||
如果你想要编译 taosAdapter,需要添加 `-DBUILD_HTTP=false` 选项。
|
||||
|
||||
如果你想要编译 taosKeeper,需要添加 `--DBUILD_KEEPER=true` 选项。
|
||||
|
||||
</details>
|
||||
|
||||
## 4.3 Windows系统上构建
|
||||
|
|
10
README.md
10
README.md
|
@ -82,6 +82,8 @@ For contributing/building/testing TDengine Connectors, please check the followin
|
|||
|
||||
At the moment, TDengine server supports running on Linux/Windows/MacOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service. TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. Right now we don't support build with cross-compiling environment.
|
||||
|
||||
If you want to compile taosAdapter or taosKeeper, you need to install Go 1.18 or above.
|
||||
|
||||
## 3.1 On Linux
|
||||
|
||||
<details>
|
||||
|
@ -168,6 +170,10 @@ cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true
|
|||
make
|
||||
```
|
||||
|
||||
If you want to compile taosAdapter, you need to add the `-DBUILD_HTTP=false` option.
|
||||
|
||||
If you want to compile taosKeeper, you need to add the `--DBUILD_KEEPER=true` option.
|
||||
|
||||
You can use Jemalloc as memory allocator instead of glibc:
|
||||
|
||||
```bash
|
||||
|
@ -196,6 +202,10 @@ mkdir debug && cd debug
|
|||
cmake .. && cmake --build .
|
||||
```
|
||||
|
||||
If you want to compile taosAdapter, you need to add the `-DBUILD_HTTP=false` option.
|
||||
|
||||
If you want to compile taosKeeper, you need to add the `--DBUILD_KEEPER=true` option.
|
||||
|
||||
</details>
|
||||
|
||||
## 4.3 Build on Windows
|
||||
|
|
|
@ -117,7 +117,7 @@ ELSE()
|
|||
ENDIF()
|
||||
|
||||
# force set all platform to JEMALLOC_ENABLED = false
|
||||
SET(JEMALLOC_ENABLED OFF)
|
||||
# SET(JEMALLOC_ENABLED OFF)
|
||||
|
||||
IF(TD_WINDOWS)
|
||||
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
|
||||
|
@ -258,3 +258,11 @@ ELSE()
|
|||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
||||
IF(${JEMALLOC_ENABLED})
|
||||
MESSAGE(STATUS "JEMALLOC_ENABLED Enabled")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=attributes")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=attributes")
|
||||
ELSE()
|
||||
MESSAGE(STATUS "JEMALLOC_ENABLED Disabled")
|
||||
ENDIF()
|
|
@ -15,6 +15,18 @@ IF (TD_PRODUCT_NAME)
|
|||
ADD_DEFINITIONS(-DTD_PRODUCT_NAME="${TD_PRODUCT_NAME}")
|
||||
ENDIF ()
|
||||
|
||||
IF (CUS_NAME)
|
||||
ADD_DEFINITIONS(-DCUS_NAME="${CUS_NAME}")
|
||||
ENDIF ()
|
||||
|
||||
IF (CUS_PROMPT)
|
||||
ADD_DEFINITIONS(-DCUS_PROMPT="${CUS_PROMPT}")
|
||||
ENDIF ()
|
||||
|
||||
IF (CUS_EMAIL)
|
||||
ADD_DEFINITIONS(-DCUS_EMAIL="${CUS_EMAIL}")
|
||||
ENDIF ()
|
||||
|
||||
find_program(HAVE_GIT NAMES git)
|
||||
|
||||
IF (DEFINED GITINFO)
|
||||
|
|
|
@ -26,6 +26,7 @@ Flink Connector supports all platforms that can run Flink 1.19 and above version
|
|||
|
||||
| Flink Connector Version | Major Changes | TDengine Version|
|
||||
|-------------------------| ------------------------------------ | ---------------- |
|
||||
| 2.1.0 | Fix the issue of writing varchar types from different data sources.| - |
|
||||
| 2.0.2 | The Table Sink supports types such as RowKind.UPDATE_BEFORE, RowKind.UPDATE_AFTER, and RowKind.DELETE.| - |
|
||||
| 2.0.1 | Sink supports writing types from Rowdata implementations.| - |
|
||||
| 2.0.0 | 1.Support SQL queries on data in TDengine database. <br/> 2. Support CDC subscription to data in TDengine database.<br/> 3. Supports reading and writing to TDengine database using Table SQL. | 3.3.5.1 and higher|
|
||||
|
@ -86,7 +87,8 @@ TDengine currently supports timestamp, number, character, and boolean types, and
|
|||
| SMALLINT | Short |
|
||||
| TINYINT | Byte |
|
||||
| BOOL | Boolean |
|
||||
| BINARY | byte[] |
|
||||
| VARCHAR | StringData |
|
||||
| BINARY | StringData |
|
||||
| NCHAR | StringData |
|
||||
| JSON | StringData |
|
||||
| VARBINARY | byte[] |
|
||||
|
@ -116,7 +118,7 @@ If using Maven to manage a project, simply add the following dependencies in pom
|
|||
<dependency>
|
||||
<groupId>com.taosdata.flink</groupId>
|
||||
<artifactId>flink-connector-tdengine</artifactId>
|
||||
<version>2.0.2</version>
|
||||
<version>2.1.0</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
|
|
@ -242,13 +242,14 @@ The query performance test mainly outputs the QPS indicator of query request spe
|
|||
|
||||
``` bash
|
||||
complete query with 3 threads and 10000 query delay avg: 0.002686s min: 0.001182s max: 0.012189s p90: 0.002977s p95: 0.003493s p99: 0.004645s SQL command: select ...
|
||||
INFO: Total specified queries: 30000
|
||||
INFO: Spend 26.9530 second completed total queries: 30000, the QPS of all threads: 1113.049
|
||||
```
|
||||
|
||||
- The first line represents the percentile distribution of query execution and query request delay for each of the three threads executing 10000 queries. The SQL command is the test query statement
|
||||
- The second line indicates that a total of 10000 * 3 = 30000 queries have been completed
|
||||
- The third line indicates that the total query time is 26.9653 seconds, and the query rate per second (QPS) is 1113.049 times/second
|
||||
- The second line indicates that the total query time is 26.9653 seconds, and the query rate per second (QPS) is 1113.049 times/second
|
||||
- If the `continue_if_fail` option is set to `yes` in the query, the last line will output the number of failed requests and error rate, the format like "error + number of failed requests (error rate)"
|
||||
- QPS = number of successful requests / time spent (in seconds)
|
||||
- Error rate = number of failed requests / (number of successful requests + number of failed requests)
|
||||
|
||||
#### Subscription metrics
|
||||
|
||||
|
@ -330,9 +331,9 @@ Parameters related to supertable creation are configured in the `super_tables` s
|
|||
|
||||
- **child_table_exists**: Whether the child table already exists, default is "no", options are "yes" or "no".
|
||||
|
||||
- **child_table_count**: Number of child tables, default is 10.
|
||||
- **childtable_count**: Number of child tables, default is 10.
|
||||
|
||||
- **child_table_prefix**: Prefix for child table names, mandatory, no default value.
|
||||
- **childtable_prefix**: Prefix for child table names, mandatory, no default value.
|
||||
|
||||
- **escape_character**: Whether the supertable and child table names contain escape characters, default is "no", options are "yes" or "no".
|
||||
|
||||
|
@ -427,11 +428,9 @@ Specify the configuration parameters for tag and data columns in `super_tables`
|
|||
|
||||
- **create_table_thread_count** : The number of threads for creating tables, default is 8.
|
||||
|
||||
- **connection_pool_size** : The number of pre-established connections with the TDengine server. If not configured, it defaults to the specified number of threads.
|
||||
|
||||
- **result_file** : The path to the result output file, default is ./output.txt.
|
||||
|
||||
- **confirm_parameter_prompt** : A toggle parameter that requires user confirmation after a prompt to continue. The default value is false.
|
||||
- **confirm_parameter_prompt** : A toggle parameter that requires user confirmation after a prompt to continue. The value can be "yes" or "no", by default "no".
|
||||
|
||||
- **interlace_rows** : Enables interleaved insertion mode and specifies the number of rows to insert into each subtable at a time. Interleaved insertion mode refers to inserting the specified number of rows into each subtable in sequence and repeating this process until all subtable data has been inserted. The default value is 0, meaning data is inserted into one subtable completely before moving to the next.
|
||||
This parameter can also be configured in `super_tables`; if configured, the settings in `super_tables` take higher priority and override the global settings.
|
||||
|
@ -460,12 +459,12 @@ For other common parameters, see Common Configuration Parameters.
|
|||
|
||||
Configuration parameters for querying specified tables (can specify supertables, subtables, or regular tables) are set in `specified_table_query`.
|
||||
|
||||
- **mixed_query** "yes": `Mixed Query` "no": `Normal Query`, default is "no"
|
||||
`Mixed Query`: All SQL statements in `sqls` are grouped by the number of threads, with each thread executing one group. Each SQL statement in a thread needs to perform `query_times` queries.
|
||||
`Normal Query `: Each SQL in `sqls` starts `threads` and exits after executing `query_times` times. The next SQL can only be executed after all previous SQL threads have finished executing and exited.
|
||||
Regardless of whether it is a `Normal Query` or `Mixed Query`, the total number of query executions is the same. The total number of queries = `sqls` * `threads` * `query_times`. The difference is that `Normal Query` starts `threads` for each SQL query, while ` Mixed Query` only starts `threads` once to complete all SQL queries. The number of thread startups for the two is different.
|
||||
`General Query`: Each SQL in `sqls` starts `threads` threads to query this SQL, Each thread exits after executing the `query_times` queries, and only after all threads executing this SQL have completed can the next SQL be executed.
|
||||
The total number of queries(`General Query`) = the number of `sqls` * `query_times` * `threads`
|
||||
- `Mixed Query` : All SQL statements in `sqls` are divided into `threads` groups, with each thread executing one group. Each SQL statement needs to execute `query_times` queries.
|
||||
The total number of queries(`Mixed Query`) = the number of `sqls` * `query_times`
|
||||
|
||||
- **query_interval** : Query interval, in seconds, default is 0.
|
||||
- **query_interval** : Query interval, in millisecond, default is 0.
|
||||
|
||||
- **threads** : Number of threads executing the SQL query, default is 1.
|
||||
|
||||
|
@ -487,6 +486,7 @@ The thread mode of the super table query is the same as the `Normal Query` mode
|
|||
- **sqls** :
|
||||
- **sql** : The SQL command to execute, required; for supertable queries, keep "xxxx" in the SQL command, the program will automatically replace it with all subtable names of the supertable.
|
||||
- **result** : File to save the query results, if not specified, results are not saved.
|
||||
- **Note**: The maximum number of SQL arrays configured under SQL is 100.
|
||||
|
||||
### Configuration Parameters for Subscription Scenarios
|
||||
|
||||
|
@ -501,7 +501,7 @@ Configuration parameters for subscribing to specified tables (can specify supert
|
|||
- **sqls** :
|
||||
- **sql** : The SQL command to execute, required.
|
||||
|
||||
#### Data Type Writing Comparison Table in Configuration File
|
||||
### Data Type Writing Comparison Table in Configuration File
|
||||
|
||||
| # | **Engine** | **taosBenchmark**
|
||||
| --- | :----------------: | :---------------:
|
||||
|
|
|
@ -139,7 +139,7 @@ const taos = require("@tdengine/websocket");
|
|||
async function createConnect() {
|
||||
try {
|
||||
let url = 'ws://127.0.0.1:6041'
|
||||
let conf = WsSql.NewConfig(url)
|
||||
let conf = new taos.WSConfig(url)
|
||||
conf.setUser('root')
|
||||
conf.setPwd('taosdata')
|
||||
conf.setDb('db')
|
||||
|
|
|
@ -25,6 +25,7 @@ import RequestId from "../../assets/resources/_request_id.mdx";
|
|||
|
||||
| Connector Version | Major Changes | TDengine Version |
|
||||
|-------------------|------------------------------------------------------------|--------------------|
|
||||
| 3.1.6 | Optimize WebSocket connection message handling. | - |
|
||||
| 3.1.5 | Fix WebSocket encoding error for Chinese character length. | - |
|
||||
| 3.1.4 | Improved WebSocket query and insert performance. | 3.3.2.0 and higher |
|
||||
| 3.1.3 | Supported WebSocket auto-reconnect. | - |
|
||||
|
@ -39,25 +40,25 @@ For error reporting in other TDengine modules, please refer to [Error Codes](../
|
|||
|
||||
## Data Type Mapping
|
||||
|
||||
| TDengine DataType | C# Type |
|
||||
|-------------------|------------------|
|
||||
| TIMESTAMP | DateTime |
|
||||
| TINYINT | sbyte |
|
||||
| SMALLINT | short |
|
||||
| INT | int |
|
||||
| BIGINT | long |
|
||||
| TINYINT UNSIGNED | byte |
|
||||
| SMALLINT UNSIGNED | ushort |
|
||||
| INT UNSIGNED | uint |
|
||||
| BIGINT UNSIGNED | ulong |
|
||||
| FLOAT | float |
|
||||
| DOUBLE | double |
|
||||
| BOOL | bool |
|
||||
| BINARY | byte[] |
|
||||
| NCHAR | string (utf-8 encoded) |
|
||||
| JSON | byte[] |
|
||||
| VARBINARY | byte[] |
|
||||
| GEOMETRY | byte[] |
|
||||
| TDengine DataType | C# Type |
|
||||
|-------------------|----------|
|
||||
| TIMESTAMP | DateTime |
|
||||
| TINYINT | sbyte |
|
||||
| SMALLINT | short |
|
||||
| INT | int |
|
||||
| BIGINT | long |
|
||||
| TINYINT UNSIGNED | byte |
|
||||
| SMALLINT UNSIGNED | ushort |
|
||||
| INT UNSIGNED | uint |
|
||||
| BIGINT UNSIGNED | ulong |
|
||||
| FLOAT | float |
|
||||
| DOUBLE | double |
|
||||
| BOOL | bool |
|
||||
| BINARY | byte[] |
|
||||
| NCHAR | string |
|
||||
| JSON | byte[] |
|
||||
| VARBINARY | byte[] |
|
||||
| GEOMETRY | byte[] |
|
||||
|
||||
**Note**: JSON type is only supported in tags.
|
||||
The GEOMETRY type is binary data in little endian byte order, conforming to the WKB standard. For more details, please refer to [Data Types](../../sql-manual/data-types/)
|
||||
|
|
|
@ -198,7 +198,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
|
|||
", current: " + rowData.getFloat(1) +
|
||||
", voltage: " + rowData.getInt(2) +
|
||||
", phase: " + rowData.getFloat(3) +
|
||||
", location: " + new String(rowData.getBinary(4)));
|
||||
", location: " + rowData.getString(4).toString());
|
||||
sb.append("\n");
|
||||
return sb.toString();
|
||||
});
|
||||
|
@ -273,7 +273,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
|
|||
", current: " + row.getFloat(1) +
|
||||
", voltage: " + row.getInt(2) +
|
||||
", phase: " + row.getFloat(3) +
|
||||
", location: " + new String(row.getBinary(4)));
|
||||
", location: " + rowData.getString(4).toString());
|
||||
sb.append("\n");
|
||||
totalVoltage.addAndGet(row.getInt(2));
|
||||
}
|
||||
|
@ -311,7 +311,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
|
|||
", current: " + rowData.getFloat(1) +
|
||||
", voltage: " + rowData.getInt(2) +
|
||||
", phase: " + rowData.getFloat(3) +
|
||||
", location: " + new String(rowData.getBinary(4)));
|
||||
", location: " + rowData.getString(4).toString());
|
||||
sb.append("\n");
|
||||
totalVoltage.addAndGet(rowData.getInt(2));
|
||||
return sb.toString();
|
||||
|
@ -353,7 +353,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
|
|||
", current: " + row.getFloat(1) +
|
||||
", voltage: " + row.getInt(2) +
|
||||
", phase: " + row.getFloat(3) +
|
||||
", location: " + new String(row.getBinary(4)));
|
||||
", location: " + rowData.getString(4).toString());
|
||||
sb.append("\n");
|
||||
totalVoltage.addAndGet(row.getInt(2));
|
||||
}
|
||||
|
@ -489,9 +489,9 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
|
|||
" `current` FLOAT," +
|
||||
" voltage INT," +
|
||||
" phase FLOAT," +
|
||||
" location VARBINARY," +
|
||||
" location VARCHAR(255)," +
|
||||
" groupid INT," +
|
||||
" tbname VARBINARY" +
|
||||
" tbname VARCHAR(255)" +
|
||||
") WITH (" +
|
||||
" 'connector' = 'tdengine-connector'," +
|
||||
" 'td.jdbc.url' = 'jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata'," +
|
||||
|
@ -506,9 +506,9 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
|
|||
" `current` FLOAT," +
|
||||
" voltage INT," +
|
||||
" phase FLOAT," +
|
||||
" location VARBINARY," +
|
||||
" location VARCHAR(255)," +
|
||||
" groupid INT," +
|
||||
" tbname VARBINARY" +
|
||||
" tbname VARCHAR(255)" +
|
||||
") WITH (" +
|
||||
" 'connector' = 'tdengine-connector'," +
|
||||
" 'td.jdbc.mode' = 'sink'," +
|
||||
|
@ -535,9 +535,9 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
|
|||
" `current` FLOAT," +
|
||||
" voltage INT," +
|
||||
" phase FLOAT," +
|
||||
" location VARBINARY," +
|
||||
" location VARCHAR(255)," +
|
||||
" groupid INT," +
|
||||
" tbname VARBINARY" +
|
||||
" tbname VARCHAR(255)" +
|
||||
") WITH (" +
|
||||
" 'connector' = 'tdengine-connector'," +
|
||||
" 'bootstrap.servers' = 'localhost:6041'," +
|
||||
|
@ -554,9 +554,9 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
|
|||
" `current` FLOAT," +
|
||||
" voltage INT," +
|
||||
" phase FLOAT," +
|
||||
" location VARBINARY," +
|
||||
" location VARCHAR(255)," +
|
||||
" groupid INT," +
|
||||
" tbname VARBINARY" +
|
||||
" tbname VARCHAR(255)" +
|
||||
") WITH (" +
|
||||
" 'connector' = 'tdengine-connector'," +
|
||||
" 'td.jdbc.mode' = 'cdc'," +
|
||||
|
|
|
@ -8,7 +8,7 @@ TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-se
|
|||
|
||||
TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论你在工作中是什么角色,请您仔细阅读[数据模型](./basic/model)一章。
|
||||
|
||||
如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。对 REST API、各种编程语言的连接器(Connector)想做更多详细了解的话,请看[连接器](./reference/connector)一章。
|
||||
如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、写入、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。对 REST API、各种编程语言的连接器(Connector)想做更多详细了解,请看[连接器](./reference/connector)一章。
|
||||
|
||||
我们已经生活在大数据时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 Database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请仔细参考[运维管理](./operation)一章。
|
||||
|
||||
|
@ -16,7 +16,7 @@ TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移
|
|||
|
||||
如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出、配置参数,如何监测 TDengine 是否健康运行,如何提升系统运行的性能,请仔细参考[运维指南](./operation)一章。
|
||||
|
||||
如果你对数据库内核设计感兴趣,或是开源爱好者,建议仔细阅读[技术内幕](./tdinternal)一章。该章从分布式架构到存储引擎、查询引擎、数据订阅,再到流计算引擎都做了详细阐述。建议对照文档,查看TDengine在GitHub的源代码,对TDengine的设计和编码做深入了解,更欢迎加入开源社区,贡献代码。
|
||||
如果你对数据库内核设计感兴趣,或是开源爱好者,建议仔细阅读[技术内幕](./tdinternal)一章。该章从分布式架构到存储引擎、查询引擎、数据订阅,再到流计算引擎都做了详细阐述。建议对照文档,查看 TDengine 在 GitHub 的源代码,对 TDengine 的设计和编码做深入了解,更欢迎加入开源社区,贡献代码。
|
||||
|
||||
最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ toc_max_heading_level: 4
|
|||
|
||||
时序数据,即时间序列数据(Time-Series Data),它们是一组按照时间发生先后顺序进行排列的序列数据。日常生活中,设备、传感器采集的数据就是时序数据,证券交易的记录也是时序数据。因此时序数据的处理并不陌生,特别在是工业自动化以及证券金融行业,专业的时序数据处理软件早已存在,比如工业领域的 PI System 以及金融行业的 KDB。
|
||||
|
||||
这些时序数据是周期、准周期产生的,或事件触发产生的,有的采集频率高,有的采集频率低。一般被发送至服务器中进行汇总并进行实时分析和处理,对系统的运行做出实时监测或预警,对股市行情进行预测。这些数据也可以被长期保存下来,用以进行离线数据分析。比如统计时间区间内设备的运行节奏与产出,分析如何进一步优化配置来提升生产效率;统计一段时间内生产过程中的成本分布,分析如何降低生产成本;统计一段时间内的设备异常值,结合业务分析潜在的安全隐患,以降低故障时长等等。
|
||||
这些时序数据是周期、准周期产生的,或事件触发产生的,有的采集频率高,有的采集频率低。一般被发送至服务器进行汇总并进行实时分析和处理,对系统的运行做出实时监测或预警,对股市行情进行预测。这些数据也可以被长期保存下来,用以进行离线数据分析。比如统计时间区间内设备的运行节奏与产出,分析如何进一步优化配置来提升生产效率;统计一段时间内生产过程中的成本分布,分析如何降低生产成本;统计一段时间内的设备异常值,结合业务分析潜在的安全隐患,以降低故障时长等等。
|
||||
|
||||
过去的二十年,随着数据通讯成本的急剧下降,以及各种传感技术和智能设备的出现,特别是物联网与工业 4.0 的推动,工业、物联网企业为了监测设备、环境、生产线及整个系统的运行状态,在各个关键点都配有传感器,采集各种数据。从手环、共享出行、智能电表、环境监测设备到电梯、数控机床、挖掘机、工业生产线等都在源源不断的产生海量的实时数据,时序数据的体量正指数级的增长。以智能电表为例,智能电表每隔 15 分钟采集一次数据,每天会自动生成 96 条记录。现在全中国已经有超过 10 亿台智能电表,一天就产生 960 亿条时序数据。一台联网的汽车往往每隔 10 到 15 秒采集一次数据发到云端,那么一天下来就很容易产生 1000 条记录。假设中国有 2 亿车辆联网,它们每天将产生总计 2000 亿条甚至更多的时序数据。
|
||||
|
||||
|
@ -33,7 +33,7 @@ toc_max_heading_level: 4
|
|||
|
||||
7. 用户关注的是一段时间的趋势:对于一条银行交易记录,或者一条微博、微信,对于它的用户而言,每一条都很重要。但对于物联网、工业时序数据,每个数据点与数据点的变化并不大,大家关心的更多是一段时间,比如过去五分钟、一小时数据变化的趋势,不会只针对一个时间点进行。
|
||||
|
||||
8. 数据是有保留期限的:采集的数据一般都有基于时长的保留策略,比如仅仅保留一天、一周、一个月、一年甚至更长时间,该类数据的价值往往是由时间段决定的,因此对于不在重要时间段内的数据,都是可以被视为过期数据整块删除的。
|
||||
8. 数据是有保留期限的:采集的数据一般都有基于时长的保留策略,比如仅仅保留一天、一周、一个月、一年甚至更长时间,该类数据的价值往往是由时间段决定的,因此对于不在重要时间段内的数据,都是可以被视为过期数据整块删除的。
|
||||
|
||||
9. 需要实时分析计算操作:对于大部分互联网大数据应用,更多的是离线分析,即使有实时分析,但要求并不高。比如用户画像、可以积累一定的用户行为数据后进行,早一天晚一天画不会特别影响结果。但是对于工业、物联网的平台应用以及交易系统,对数据的实时计算要求就往往很高,因为需要根据计算结果进行实时报警、监控,从而避免事故的发生、决策时机的错过。
|
||||
|
||||
|
@ -47,7 +47,7 @@ toc_max_heading_level: 4
|
|||
|
||||
1. 电力能源领域:电力能源领域范围较大,不论是在发电、输电、配电、用电还是其他环节中,各种电力设备都会产生大量时序数据,以风力发电为例,风电机作为大型设备,拥有可能高达数百的数据采集点,因此每日所产生的时序数据量极其之大,对这些数据的监控分析是确保发电环节准确无误的必要工作。在用电环节,对智能电表实时采集回来的电流、电压等数据进行快速计算,实时了解最新的用电总量、尖、峰、平、谷用电量,判断设备是否正常工作。有些时候,电力系统可能需要拉取历史上某一年的全量数据,通过机器学习等技术分析用户的用电习惯、进行负荷预测、节能方案设计、帮助电力公司合理规划电力的供应。或者拉取上个月的尖峰平谷用电量,根据不同价位进行周期性的电费结算,以上都是时序数据在电力能源领域的典型应用。
|
||||
|
||||
2. 车联网/轨道交通领域:车辆的 GPS 、速度、油耗、故障信息等,都是典型的时序数据,通过对它们科学合理地数据分析,可以为车辆管理和优化提供强有力的支持。但是,不同车型采集的点位信息从数百点到数千点之间不一而同,随着联网的交通设备数量越来越多,这些海量的时序数据如何安全上传、数据存储、查询和分析,成为了一个亟待解决的行业问题。对于交通工具的本身,科学合理地处理时序数据可以实现车辆轨迹追踪、无人驾驶、故障预警等功能。对于交通工具的整体配套服务,也可以提供良好的支持。比如,在新一代的智能地铁管理系统中,通过地铁站中各种传感器的时序数据采集分析,可以在站中实时展示各个车厢的拥挤度、温度、舒适度等数据,让用户可以自行选择体验度最佳的出行方案,对于地铁运营商,也可以更好地实现乘客流量的调度管理。
|
||||
2. 车联网/轨道交通领域:车辆的 GPS 、速度、油耗、故障信息等,都是典型的时序数据,通过科学合理地数据分析,可以为车辆管理和优化提供强有力的支持。但是,不同车型采集的点位信息从数百点到数千点之间不一而同,随着联网的交通设备数量越来越多,这些海量的时序数据如何安全上传、数据存储、查询和分析,成为了一个亟待解决的行业问题。对于交通工具的本身,科学合理地处理时序数据可以实现车辆轨迹追踪、无人驾驶、故障预警等功能。对于交通工具的整体配套服务,也可以提供良好的支持。比如,在新一代的智能地铁管理系统中,通过地铁站中各种传感器的时序数据采集分析,可以在站中实时展示各个车厢的拥挤度、温度、舒适度等数据,让用户可以自行选择体验度最佳的出行方案,对于地铁运营商,也可以更好地实现乘客流量的调度管理。
|
||||
|
||||
3. 智能制造领域:过去的十几年间,许多传统工业企业的数字化得到了长足的发展,单个工厂从传统的几千个数据采集点,到如今数十万点、上百万点,部分远程运维场景面临上万设备、千万点的数据采集存储的需求,这些数据都属于典型的时序数据。就整个工业大数据系统而言,时序数据的处理是相当复杂的。以烟草行业的数据采集为例,设备的工业数据协议各式各样、数据采集单位随着设备类型的不同而不同。数据的实时处理能力随着数据采集点的持续增加而难以跟上,与此同时还要兼顾数据的高性能、高可用、可拓展性等等诸多特性。但从另一个角度来看,如果大数据平台能够解决以上困难,满足企业对于时序数据存储分析的需求,就可以帮助企业实现更加智能化、自动化的生产模式,从而完成质的飞升。
|
||||
|
||||
|
@ -55,7 +55,7 @@ toc_max_heading_level: 4
|
|||
|
||||
5. IT 运维领域:IT 领域中,基础设施(如服务器、网络设备、存储设备)、应用程序运行的过程中会产生大量的时序数据。通过对这些时序数据的监控,可以很快地发现基础设施/应用的运行状态和服务可用性,包括系统是否在线、服务是否正常响应等;也能看到具体到某一个具体的点位的性能指标:如 CPU 利用率、内存利用率、磁盘空间利用率、网络带宽利用率等; 还可以监控系统产生的错误日志和异常事件,包括入侵检测、安全事件日志、权限控制等,最终通过设置报警规则,及时通知管理员或运维人员具体的情况,从而及时发现问题、预防故障,并优化系统性能,确保系统稳定可靠地运行。
|
||||
|
||||
6. 金融领域:金融领域目前正经历着数据管理的一场革命,它们的行情数据属于典型的时序数据,由于保留行情数据的储存期限往往需长达 5 至 10 年,甚至超过 30 年,而且可能全世界各个国家/地区的主流金融市场的交易数据都需要全量保存,因此行情数据的总量数据体量庞大,会轻松达到 TB 级别,造成存储、查询等等各方面的瓶颈。在金融领域中,量化交易平台是最能凸显时序数据处理重要性的革命性应用之一:通过对大量时序行情数据的读取分析来及时响应市场变化,帮助交易者把握投资机会,同时规避不必要的风险,实现资产的稳健增长。可以实现包括但不限于:资产管理、情绪监控、股票回测、交易信号模拟、报表自动生成等等诸多功能。
|
||||
6. 金融领域:金融领域目前正经历着数据管理的一场革命,行情数据属于典型的时序数据,由于保留行情数据的储存期限往往需长达 5 至 10 年,甚至超过 30 年,而且可能全世界各个国家/地区的主流金融市场的交易数据都需要全量保存,因此行情数据的总量数据体量庞大,会轻松达到 TB 级别,造成存储、查询等等各方面的瓶颈。在金融领域中,量化交易平台是最能凸显时序数据处理重要性的革命性应用之一:通过对大量时序行情数据的读取分析来及时响应市场变化,帮助交易者把握投资机会,同时规避不必要的风险,实现资产的稳健增长。可以实现包括但不限于:资产管理、情绪监控、股票回测、交易信号模拟、报表自动生成等等诸多功能。
|
||||
|
||||
## 处理时序数据所需要的工具
|
||||
|
||||
|
@ -71,11 +71,11 @@ toc_max_heading_level: 4
|
|||
|
||||
5. 缓存(Cache):物联网、工业、金融应用需要实时展示一些设备或股票的最新状态,因此平台需要缓存技术提供快速的数据访问。原因是:由于时序数据体量极大,如果不使用缓存技术,而是进行常规的读取、筛选,那么对于监控设备最新状态之类的计算是十分困难的,将会导致很大的延迟,从而失去“实时”的意义。因此,缓存技术是时序数据处理平台不可缺少的一环, Redis 就是这样一种常用的缓存工具。
|
||||
|
||||
处理时序数据需要一系列模块的协同作业,从数据采集到存储、计算、分析与可视化,再到专用的时序数据算法库,每个环节都有相应的工具支持。这些工具的选择取决于具体的业务需求和数据特点,合理地选用和搭配它们才能做到高效地处理各种类型的时序数据,挖掘数据背后的价值。
|
||||
处理时序数据需要一系列模块的协同作业,从数据采集到存储、计算、分析与可视化,再到专用的时序数据算法库,每个环节都有相应的工具支持。这些工具的选择取决于具体的业务需求和数据特点,合理地选用和搭配才能做到高效地处理各种类型的时序数据,挖掘数据背后的价值。
|
||||
|
||||
## 专用时序数据处理工具的必要性
|
||||
|
||||
在时序数据的十大特征一节中提到,对于一个优秀的时序大数据处理平台来说,它必然需要具备处理时序数据十大特征的能力。在处理时序数据所需要的工具一节中介绍了时序大数据平台处理时序数据所需要的主要模块/组件。 结合这两节的内容与实际情况,可以发现:处理海量时序数据,其实是一个很庞大复杂的系统。
|
||||
在时序数据的十大特征一节中提到,对于一个优秀的时序大数据处理平台来说,必然需要具备处理时序数据十大特征的能力。在处理时序数据所需要的工具一节中介绍了时序大数据平台处理时序数据所需要的主要模块/组件。结合这两节的内容与实际情况,可以发现:处理海量时序数据,其实是一个很庞大复杂的系统。
|
||||
|
||||
早些年,为处理日益增长的互联网数据,众多的工具开始出现,最流行的便是 Hadoop 体系。除使用大家所熟悉的 Hadoop 组件如 HDFS、MapReduce、HBase 和 Hive 外,通用的大数据处理平台往往还使用 Kafka 或其他消息队列工具,Redis 或其他缓存软件,Flink 或其他实时流式数据处理软件。存储上也有人选用 MongoDB、Cassandra 或其他 NoSQL 数据库。这样一个典型的大数据处理平台基本上能很好的处理互联网行业的引用,比如典型的用户画像、舆情分析等。
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ TDengine 是一个高性能、分布式的时序数据库。通过集成的缓
|
|||
|
||||
TDengine OSS 是一个开源的高性能时序数据库,与其他时序数据库相比,它的核心优势在于其集群开源、高性能和云原生架构。而且除了基础的写入、查询和存储功能外,TDengine OSS 还集成了缓存、流式计算和数据订阅等高级功能,这些功能显著简化了系统设计,降低了企业的研发和运营成本。
|
||||
|
||||
在 TDengine OSS 的基础上,企业版 TDengine Enterprise 提供了增强的辅助功能,包括数据的备份恢复、异地容灾、多级存储、视图、权限控制、安全加密、IP 白名单、支持 MQTT、OPC-UA、OPC-DA、PI、Wonderware、Kafka 等各种数据源。这些功能为企业提供了更为全面、安全、可靠和高效的时序数据管理解决方案。更多的细节请看 [TDengine Enterprise](https://www.taosdata.com/tdengine-pro)
|
||||
在 TDengine OSS 的基础上,TDengine Enterprise 提供了增强的辅助功能,包括数据的备份恢复、异地容灾、多级存储、视图、权限控制、安全加密、IP 白名单、支持 MQTT、OPC-UA、OPC-DA、PI、Wonderware、Kafka 等各种数据源。这些功能为企业提供了更为全面、安全、可靠和高效的时序数据管理解决方案。更多的细节请看 [TDengine Enterprise](https://www.taosdata.com/tdengine-pro)。
|
||||
|
||||
此外,TDengine Cloud 作为一种全托管的云服务,存储与计算分离,分开计费,为企业提供了企业级的工具和服务,彻底解决了运维难题,尤其适合中小规模的用户使用。更多的细节请看[TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
|
||||
|
||||
|
@ -30,19 +30,19 @@ TDengine 经过特别优化,以适应时间序列数据的独特需求,引
|
|||
|
||||
4. 流式计算:TDengine 流式计算引擎提供了实时处理写入的数据流的能力,不仅支持连续查询,还支持事件驱动的流式计算。它提供了替代复杂流处理系统的轻量级解决方案,并能够在高吞吐的数据写入的情况下,提供毫秒级的计算结果延迟。
|
||||
|
||||
5. 数据订阅:TDengine 提供了类似 Kafka 的数据订阅功能。但用户可以通过 SQL 来灵活控制订阅的数据内容,并使用 Kafka 相同的 API 来订阅一张表、一组表、全部列或部分列、甚至整个数据库的数据。TDengine 可以替代需要集成消息队列产品的场景, 从而简化系统设计的复杂度,降低运营维护成本。
|
||||
5. 数据订阅:TDengine 提供了类似 Kafka 的数据订阅功能。但用户可以通过 SQL 来灵活控制订阅的数据内容,并使用和 Kafka 相同的 API 来订阅一张表、一组表、全部列或部分列、甚至整个数据库的数据。TDengine 可以替代需要集成消息队列产品的场景, 从而简化系统设计的复杂度,降低运营维护成本。
|
||||
|
||||
6. 可视化/BI:TDengine 本身不提供可视化或 BI 的功能。但通过其 RESTful API, 标准的 JDBC、ODBC 接口,TDengine 能够 Grafana、Google Data Studio、Power BI、Tableau 以及国产 BI 工具无缝集成。
|
||||
6. 可视化/BI:TDengine 本身不提供可视化或 BI 的功能。但通过其 RESTful API, 标准的 JDBC、ODBC 接口,TDengine 能够和 Grafana、Google Data Studio、Power BI、Tableau 以及国产 BI 工具无缝集成。
|
||||
|
||||
7. 集群功能:TDengine 支持集群部署,能够随着业务数据量的增长,通过增加节点线性提升系统处理能力,实现水平扩展。同时,通过多副本技术提供高可用性,并支持 Kubernetes 部署。同时还提供了多种运维工具,方便系统管理员更好地管理和维护集群的健壮运行。
|
||||
7. 集群功能:TDengine 支持集群部署,能够随着业务数据量的增长,通过增加节点线性提升系统处理能力,实现水平扩展。同时,通过多副本技术提供高可用性,支持 Kubernetes 部署,提供了多种运维工具,方便系统管理员更好地管理和维护集群的健壮运行。
|
||||
|
||||
8. 数据迁移:TDengine 提供了多种便捷的数据导入导出功能,包括脚本文件导入导出、数据文件导入导出、taosdump 工具导入导出等。
|
||||
|
||||
9. 编程连接器:TDengine 提供不同语言的连接器,包括 C/C++、Java、Go、Node.js、Rust、Python、C#、R、PHP 等。这些连接器大多都支持原生连接和 WebSocket 两种连接方式。TDengine 也提供 RESTful 接口,任何语言的应用程序可以直接通过 HTTP 请求访问数据库。
|
||||
9. 编程连接器:TDengine 提供多种语言的连接器,包括 C/C++、Java、Go、Node.js、Rust、Python、C#、R、PHP 等。这些连接器大多都支持原生连接和 WebSocket 两种连接方式。TDengine 也提供 RESTful 接口,任何语言的应用程序可以直接通过 HTTP 请求访问数据库。
|
||||
|
||||
10. 数据安全:TDengine 提供了丰富的用户管理和权限管理功能以控制不同用户对数据库和表的访问权限,提供了 IP 白名单功能以控制不同帐号只能从特定的服务器接入集群。TDengine 支持系统管理员对不同数据库按需加密,数据加密后对读写完全透明且对性能的影响很小。还提供了审计日志功能以记录系统中的敏感操作。
|
||||
|
||||
11. 常用工具:TDengine 还提供了交互式命令行程序(CLI),便于管理集群、检查系统状态、做即时查询。压力测试工具 taosBenchmark,用于测试 TDengine 的性能。TDengine 还提供了图形化管理界面,简化了操作和管理过程。
|
||||
11. 常用工具:TDengine 提供了交互式命令行程序(CLI),便于管理集群、检查系统状态、做即时查询。压力测试工具 taosBenchmark,用于测试 TDengine 的性能。TDengine 还提供了图形化管理界面,简化了操作和管理过程。
|
||||
|
||||
12. 零代码数据接入:TDengine 企业版提供了丰富的数据接入功能,依托强大的数据接入平台,无需一行代码,只需要做简单的配置即可实现多种数据源的数据接入,目前已经支持的数据源包括:OPC-UA、OPC-DA、PI、MQTT、Kafka、InfluxDB、OpenTSDB、MySQL、SQL Server、Oracle、Wonderware Historian、MongoDB。
|
||||
|
||||
|
@ -63,8 +63,11 @@ TDengine 经过特别优化,以适应时间序列数据的独特需求,引
|
|||
6. 核心开源:TDengine 的核心代码,包括集群功能,均在开源协议下公开发布。它在 GitHub 网站全球趋势排行榜上多次位居榜首,显示出其受欢迎程度。同时,TDengine 拥有一个活跃的开发者社区,为技术的持续发展和创新提供了有力支持。
|
||||
|
||||
采用 TDengine,企业可以在物联网、车联网、工业互联网等典型场景中显著降低大数据平台的总拥有成本,主要体现在以下几个方面:
|
||||
|
||||
1. 高性能带来的成本节约:TDengine 卓越的写入、查询和存储性能意味着系统所需的计算资源和存储资源可以大幅度减少。这不仅降低了硬件成本,还减少了能源消耗和维护费用。
|
||||
|
||||
2. 标准化与兼容性带来的成本效益:由于 TDengine 支持标准 SQL,并与众多第三方软件实现了无缝集成,用户可以轻松地将现有系统迁移到 TDengine 上,无须重写大量代码。这种标准化和兼容性大大降低了学习和迁移成本,缩短了项目周期。
|
||||
|
||||
3. 简化系统架构带来的成本降低:作为一个极简的时序数据平台,TDengine 集成了消息队列、缓存、流计算等必要功能,避免了额外集成众多其他组件的需要。这种简化的系统架构显著降低了系统的复杂度,从而减少了研发和运营成本,提高了整体运营效率。
|
||||
|
||||
## 技术生态
|
||||
|
@ -78,7 +81,7 @@ TDengine 经过特别优化,以适应时间序列数据的独特需求,引
|
|||
<center><figcaption>图 1. TDengine 技术生态图</figcaption></center>
|
||||
</figure>
|
||||
|
||||
上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka,他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序(CLI)以及可视化管理工具。
|
||||
上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka,它们的数据将被源源不断的写入到 TDengine。右侧是可视化、BI 工具、组态软件、应用程序。下侧是 TDengine 自身提供的命令行程序(CLI)以及可视化管理工具。
|
||||
|
||||
## 典型适用场景
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ Flink Connector 支持所有能运行 Flink 1.19 及以上版本的平台。
|
|||
## 版本历史
|
||||
| Flink Connector 版本 | 主要变化 | TDengine 版本 |
|
||||
| ------------------| ------------------------------------ | ---------------- |
|
||||
| 2.1.0 | 修复不同数据源varchar类型写入问题| - |
|
||||
| 2.0.2 | Table Sink 支持 RowKind.UPDATE_BEFORE、RowKind.UPDATE_AFTER 和 RowKind.DELETE 类型| - |
|
||||
| 2.0.1 | Sink 支持对所有继承自 RowData 并已实现的类型进行数据写入| - |
|
||||
| 2.0.0 | 1. 支持 SQL 查询 TDengine 数据库中的数据<br/> 2. 支持 CDC 订阅 TDengine 数据库中的数据<br/> 3. 支持 Table SQL 方式读取和写入 TDengine 数据库| 3.3.5.1 及以上版本 |
|
||||
|
@ -84,7 +85,8 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Flink RowD
|
|||
| SMALLINT | Short |
|
||||
| TINYINT | Byte |
|
||||
| BOOL | Boolean |
|
||||
| BINARY | byte[] |
|
||||
| VARCHAR | StringData |
|
||||
| BINARY | StringData |
|
||||
| NCHAR | StringData |
|
||||
| JSON | StringData |
|
||||
| VARBINARY | byte[] |
|
||||
|
@ -113,7 +115,7 @@ env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.AT_LEAST_ONCE);
|
|||
<dependency>
|
||||
<groupId>com.taosdata.flink</groupId>
|
||||
<artifactId>flink-connector-tdengine</artifactId>
|
||||
<version>2.0.2</version>
|
||||
<version>2.1.0</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
|
|
@ -490,7 +490,7 @@ TDengine 客户端驱动提供了应用编程所需要的全部 API,并且在
|
|||
- 支持版本:从 v3.3.4.3 版本开始引入
|
||||
|
||||
#### bypassFlag
|
||||
- 说明:配置文件所在目录
|
||||
- 说明:用于短路测试 `内部参数`
|
||||
- 类型:整数;
|
||||
- 取值范围:0:正常写入,1:写入消息在 taos 客户端发送 RPC 消息前返回,2:写入消息在 taosd 服务端收到 RPC 消息后返回,4:写入消息在 taosd 服务端写入内存缓存前返回,8:写入消息在 taosd 服务端数据落盘前返回
|
||||
- 默认值:0
|
||||
|
|
|
@ -4,17 +4,21 @@ sidebar_label: taos
|
|||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine 实例并与之交互的最简洁最常用工具。 使用前需要安装 TDengine Server 安装包或 TDengine Client 安装包。
|
||||
TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine 实例并与之交互最简洁常用工具。
|
||||
|
||||
## 启动
|
||||
## 工具获取
|
||||
|
||||
要进入 TDengine CLI,您在终端执行 `taos` 即可。
|
||||
TDengine CLI 是 TDengine 服务器及客户端安装包中默认安装组件,安装后即可使用,参考 [TDengine 安装](../../../get-started/)
|
||||
|
||||
## 运行
|
||||
|
||||
进入 TDengine CLI 交互执行模式,在终端命令行执行:
|
||||
|
||||
```bash
|
||||
taos
|
||||
```
|
||||
|
||||
如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息。
|
||||
如果连接服务成功,将会打印出欢迎消息和版本信息。若失败,打印错误消息。
|
||||
|
||||
TDengine CLI 的提示符号如下:
|
||||
|
||||
|
@ -22,42 +26,24 @@ TDengine CLI 的提示符号如下:
|
|||
taos>
|
||||
```
|
||||
|
||||
进入 TDengine CLI 后,你可执行各种 SQL 语句,包括插入、查询以及各种管理命令。
|
||||
进入 TDengine CLI 后,可执行各种 SQL 语句,包括插入、查询以及各种管理命令。
|
||||
退出 TDengine CLI, 执行 `q` 或 `quit` 或 `exit` 回车即可
|
||||
```shell
|
||||
taos> quit
|
||||
```
|
||||
|
||||
|
||||
## 执行 SQL 脚本
|
||||
|
||||
在 TDengine CLI 里可以通过 `source` 命令来运行脚本文件中的多条 SQL 命令。
|
||||
|
||||
```sql
|
||||
taos> source <filename>;
|
||||
```
|
||||
|
||||
## 在线修改显示字符宽度
|
||||
|
||||
可以在 TDengine CLI 里使用如下命令调整字符显示宽度
|
||||
|
||||
```sql
|
||||
taos> SET MAX_BINARY_DISPLAY_WIDTH <nn>;
|
||||
```
|
||||
|
||||
如显示的内容后面以 ... 结尾时,表示该内容已被截断,可通过本命令修改显示字符宽度以显示完整的内容。
|
||||
|
||||
## 命令行参数
|
||||
|
||||
您可通过配置命令行参数来改变 TDengine CLI 的行为。以下为常用的几个命令行参数:
|
||||
### 常用参数
|
||||
可通过配置命令行参数来改变 TDengine CLI 的行为。以下为常用的几个命令行参数:
|
||||
|
||||
- -h HOST: 要连接的 TDengine 服务端所在服务器的 FQDN, 默认为连接本地服务
|
||||
- -P PORT: 指定服务端所用端口号
|
||||
- -u USER: 连接时使用的用户名
|
||||
- -p PASSWORD: 连接服务端时使用的密码,特殊字符如 `! & ( ) < > ; |` 需使用字符 `\` 进行转义处理
|
||||
- -h HOST: 要连接的 TDengine 服务端所在服务器的 FQDN, 默认值: 127.0.0.1
|
||||
- -P PORT: 指定服务端所用端口号,默认值:6030
|
||||
- -u USER: 连接时使用的用户名,默认值:root
|
||||
- -p PASSWORD: 连接服务端时使用的密码,特殊字符如 `! & ( ) < > ; |` 需使用字符 `\` 进行转义处理, 默认值:taosdata
|
||||
- -?, --help: 打印出所有命令行参数
|
||||
|
||||
还有更多其他参数:
|
||||
### 更多参数
|
||||
|
||||
- -a AUTHSTR: 连接服务端的授权信息
|
||||
- -A: 通过用户名和密码计算授权信息
|
||||
|
@ -79,27 +65,58 @@ taos> SET MAX_BINARY_DISPLAY_WIDTH <nn>;
|
|||
- -z TIMEZONE: 指定时区,默认为本地时区
|
||||
- -V: 打印出当前版本号
|
||||
|
||||
示例:
|
||||
### 非交互式执行
|
||||
|
||||
使用 `-s` 参数可进行非交互式执行 SQL,执行完成后退出,此模式适合在自动化脚本中使用。
|
||||
如以下命令连接到服务器 h1.taos.com, 执行 -s 指定的 SQL:
|
||||
```bash
|
||||
taos -h h1.taos.com -s "use db; show tables;"
|
||||
```
|
||||
|
||||
## 配置文件
|
||||
### taosc 配置文件
|
||||
|
||||
也可以通过配置文件中的参数设置来控制 TDengine CLI 的行为。可用配置参数请参考[客户端配置](../../components/taosc)
|
||||
使用 `-c` 参数改变 `taosc` 客户端加载配置文件的位置,客户端配置参数参考 [客户端配置](../../components/taosc)
|
||||
以下命令指定了 `taosc` 客户端加载 `/root/cfg/` 下的 `taos.cfg` 配置文件
|
||||
```bash
|
||||
taos -c /root/cfg/
|
||||
```
|
||||
|
||||
## 错误代码表
|
||||
在 TDengine 3.3.4.8 版本后 TDengine CLI 在返回错误信息中返回了具体错误码,用户可到 TDengine 官网错误码页面查找具体原因及解决措施,见:[错误码参考表](https://docs.taosdata.com/reference/error-code/)
|
||||
## 执行 SQL 脚本
|
||||
|
||||
## TDengine CLI TAB 键补全
|
||||
在 TDengine CLI 里可以通过 `source` 命令来运行脚本文件中的多条 SQL 命令。
|
||||
|
||||
```sql
|
||||
taos> source <filename>;
|
||||
```
|
||||
|
||||
## 数据导入/导出
|
||||
|
||||
### 导出查询结果
|
||||
|
||||
- 可以使用符号 “>>” 导出查询结果到某个文件中,语法为: sql 查询语句 >> ‘输出文件名’; 输出文件如果不写路径的话,将输出至当前目录下。如 select * from d0 >> ‘/root/d0.csv’; 将把查询结果输出到 /root/d0.csv 中。
|
||||
|
||||
### 数据从文件导入
|
||||
|
||||
- 可以使用 insert into table_name file '输入文件名',把上一步中导出的数据文件再导入到指定表中。如 insert into d0 file '/root/d0.csv'; 表示把上面导出的数据全部再导致至 d0 表中。
|
||||
|
||||
## 设置字符类型显示宽度
|
||||
|
||||
可以在 TDengine CLI 里使用如下命令调整字符显示宽度
|
||||
|
||||
```sql
|
||||
taos> SET MAX_BINARY_DISPLAY_WIDTH <nn>;
|
||||
```
|
||||
|
||||
如显示的内容后面以 ... 结尾时,表示该内容已被截断,可通过本命令修改显示字符宽度以显示完整的内容。
|
||||
|
||||
## TAB 键自动补全
|
||||
|
||||
- TAB 键前为空命令状态下按 TAB 键,会列出 TDengine CLI 支持的所有命令
|
||||
- TAB 键前为空格状态下按 TAB 键,会显示此位置可以出现的所有命令词的第一个,再次按 TAB 键切为下一个
|
||||
- TAB 键前为字符串,会搜索与此字符串前缀匹配的所有可出现命令词,并显示第一个,再次按 TAB 键切为下一个
|
||||
- 输入反斜杠 `\` + TAB 键, 会自动补全为列显示模式命令词 `\G;`
|
||||
|
||||
## TDengine CLI 小技巧
|
||||
## 使用小技巧
|
||||
|
||||
- 可以使用上下光标键查看历史输入的指令
|
||||
- 在 TDengine CLI 中使用 `alter user` 命令可以修改用户密码,缺省密码为 `taosdata`
|
||||
|
@ -107,10 +124,5 @@ taos -h h1.taos.com -s "use db; show tables;"
|
|||
- 执行 `RESET QUERY CACHE` 可清除本地表 Schema 的缓存
|
||||
- 批量执行 SQL 语句。可以将一系列的 TDengine CLI 命令(以英文 ; 结尾,每个 SQL 语句为一行)按行存放在文件里,在 TDengine CLI 里执行命令 `source <file-name>` 自动执行该文件里所有的 SQL 语句
|
||||
|
||||
## TDengine CLI 导出查询结果到文件中
|
||||
|
||||
- 可以使用符号 “>>” 导出查询结果到某个文件中,语法为: sql 查询语句 >> ‘输出文件名’; 输出文件如果不写路径的话,将输出至当前目录下。如 select * from d0 >> ‘/root/d0.csv’; 将把查询结果输出到 /root/d0.csv 中。
|
||||
|
||||
## TDengine CLI 导入文件中的数据到表中
|
||||
|
||||
- 可以使用 insert into table_name file '输入文件名',把上一步中导出的数据文件再导入到指定表中。如 insert into d0 file '/root/d0.csv'; 表示把上面导出的数据全部再导致至 d0 表中。
|
||||
## 错误代码表
|
||||
在 TDengine 3.3.4.8 版本后 TDengine CLI 在返回错误信息中返回了具体错误码,用户可到 TDengine 官网错误码页面查找具体原因及解决措施,见:[错误码参考表](https://docs.taosdata.com/reference/error-code/)
|
||||
|
|
|
@ -6,44 +6,24 @@ toc_max_heading_level: 4
|
|||
|
||||
taosdump 是为开源用户提供的 TDengine 数据备份/恢复工具,备份数据文件采用标准 [ Apache AVRO ](https://avro.apache.org/) 格式,方便与外界生态交换数据。taosdump 提供多种数据备份及恢复选项来满足不同需求,可通过 --help 查看支持的全部选项。
|
||||
|
||||
## 工具获取
|
||||
|
||||
## 安装
|
||||
taosdump 是 TDengine 服务器及客户端安装包中默认安装组件,安装后即可使用,参考 [TDengine 安装](../../../get-started/)
|
||||
|
||||
taosdump 是 TDengine 安装包中默认安装组件,安装 TDengine 后即可使用,可参考 [TDengine 安装](../../../get-started/)
|
||||
## 运行
|
||||
taosdump 需在命令行终端中运行,运行时必须带参数,指明是备份操作或还原操作,如:
|
||||
``` bash
|
||||
taosdump -h dev126 -D test -o /root/test/
|
||||
```
|
||||
以上命令表示备份主机名为 `dev126` 机器上的 `test` 数据库到 `/root/test/` 目录下
|
||||
|
||||
``` bash
|
||||
taosdump -h dev126 -i /root/test/
|
||||
```
|
||||
以上命令表示把 `/root/test/` 目录下之前备份的数据文件恢复到主机名为 `dev126` 的主机上
|
||||
|
||||
|
||||
## 常用使用场景
|
||||
|
||||
### taosdump 备份数据
|
||||
|
||||
1. 备份所有数据库:指定 `-A` 或 `--all-databases` 参数;
|
||||
2. 备份多个指定数据库:使用 `-D db1,db2,...` 参数;
|
||||
3. 备份指定数据库中某些超级表或普通表:使用 `dbname stbname1 stbname2 tbname1 tbname2 ...` 参数,注意这种输入序列第一个参数为数据库名称,且只支持一个数据库,第二个和之后的参数为该数据库中的超级表或普通表名称,中间以空格分隔;
|
||||
4. 备份系统 log 库:TDengine 集群通常会包含一个系统数据库,名为 `log`,这个数据库内的数据为 TDengine 自我运行的数据,taosdump 默认不会对 log 库进行备份。如果有特定需求对 log 库进行备份,可以使用 `-a` 或 `--allow-sys` 命令行参数。
|
||||
5. “宽容”模式备份:taosdump 1.4.1 之后的版本提供 `-n` 参数和 `-L` 参数,用于备份数据时不使用转义字符和“宽容”模式,可以在表名、列名、标签名没使用转义字符的情况下减少备份数据时间和备份数据占用空间。如果不确定符合使用 `-n` 和 `-L` 条件时请使用默认参数进行“严格”模式进行备份。转义字符的说明请参考[官方文档](../../taos-sql/escape)。
|
||||
6. `-o` 参数指定的目录下如果已存在备份文件,为防止数据被覆盖,taosdump 会报错并退出,请更换其它空目录或清空原来数据后再备份。
|
||||
7. 目前 taosdump 不支持数据断点继备功能,一旦数据备份中断,需要从头开始。如果备份需要很长时间,建议使用(-S -E 选项)指定开始/结束时间进行分段备份的方法,
|
||||
|
||||
:::tip
|
||||
- taosdump 1.4.1 之后的版本提供 `-I` 参数,用于解析 avro 文件 schema 和数据,如果指定 `-s` 参数将只解析 schema。
|
||||
- taosdump 1.4.2 之后的备份使用 `-B` 参数指定的批次数,默认值为 16384,如果在某些环境下由于网络速度或磁盘性能不足导致 "Error actual dump .. batch .." 可以通过 `-B` 参数调整为更小的值进行尝试。
|
||||
- taosdump 的导出不支持中断恢复,所以当进程意外终止后,正确的处理方式是删除当前已导出或生成的所有相关文件。
|
||||
- taosdump 的导入支持中断恢复,但是当进程重新启动时,会收到一些“表已经存在”的提示,可以忽视。
|
||||
|
||||
:::
|
||||
|
||||
### taosdump 恢复数据
|
||||
|
||||
- 恢复指定路径下的数据文件:使用 `-i` 参数加上数据文件所在路径。如前面提及,不应该使用同一个目录备份不同数据集合,也不应该在同一路径多次备份同一数据集,否则备份数据会造成覆盖或多次备份。
|
||||
- taosdump 支持数据恢复至新数据库名下,参数是 -W, 详细见命令行参数说明。
|
||||
|
||||
|
||||
:::tip
|
||||
taosdump 内部使用 TDengine stmt binding API 进行恢复数据的写入,为提高数据恢复性能,目前使用 16384 为一次写入批次。如果备份数据中有比较多列数据,可能会导致产生 "WAL size exceeds limit" 错误,此时可以通过使用 `-B` 参数调整为一个更小的值进行尝试。
|
||||
|
||||
:::
|
||||
|
||||
## 详细命令行参数列表
|
||||
## 命令行参数
|
||||
|
||||
以下为 taosdump 详细命令行参数列表:
|
||||
|
||||
|
@ -119,3 +99,34 @@ for any corresponding short options.
|
|||
|
||||
Report bugs to <support@taosdata.com>.
|
||||
```
|
||||
|
||||
## 常用使用场景
|
||||
|
||||
### taosdump 备份数据
|
||||
|
||||
1. 备份所有数据库:指定 `-A` 或 `--all-databases` 参数;
|
||||
2. 备份多个指定数据库:使用 `-D db1,db2,...` 参数;
|
||||
3. 备份指定数据库中某些超级表或普通表:使用 `dbname stbname1 stbname2 tbname1 tbname2 ...` 参数,注意这种输入序列第一个参数为数据库名称,且只支持一个数据库,第二个和之后的参数为该数据库中的超级表或普通表名称,中间以空格分隔;
|
||||
4. 备份系统 log 库:TDengine 集群通常会包含一个系统数据库,名为 `log`,这个数据库内的数据为 TDengine 自我运行的数据,taosdump 默认不会对 log 库进行备份。如果有特定需求对 log 库进行备份,可以使用 `-a` 或 `--allow-sys` 命令行参数。
|
||||
5. “宽容”模式备份:taosdump 1.4.1 之后的版本提供 `-n` 参数和 `-L` 参数,用于备份数据时不使用转义字符和“宽容”模式,可以在表名、列名、标签名没使用转义字符的情况下减少备份数据时间和备份数据占用空间。如果不确定符合使用 `-n` 和 `-L` 条件时请使用默认参数进行“严格”模式进行备份。转义字符的说明请参考[官方文档](../../taos-sql/escape)。
|
||||
6. `-o` 参数指定的目录下如果已存在备份文件,为防止数据被覆盖,taosdump 会报错并退出,请更换其它空目录或清空原来数据后再备份。
|
||||
7. 目前 taosdump 不支持数据断点继备功能,一旦数据备份中断,需要从头开始。如果备份需要很长时间,建议使用(-S -E 选项)指定开始/结束时间进行分段备份的方法,
|
||||
|
||||
:::tip
|
||||
- taosdump 1.4.1 之后的版本提供 `-I` 参数,用于解析 avro 文件 schema 和数据,如果指定 `-s` 参数将只解析 schema。
|
||||
- taosdump 1.4.2 之后的备份使用 `-B` 参数指定的批次数,默认值为 16384,如果在某些环境下由于网络速度或磁盘性能不足导致 "Error actual dump .. batch .." 可以通过 `-B` 参数调整为更小的值进行尝试。
|
||||
- taosdump 的导出不支持中断恢复,所以当进程意外终止后,正确的处理方式是删除当前已导出或生成的所有相关文件。
|
||||
- taosdump 的导入支持中断恢复,但是当进程重新启动时,会收到一些“表已经存在”的提示,可以忽视。
|
||||
|
||||
:::
|
||||
|
||||
### taosdump 恢复数据
|
||||
|
||||
- 恢复指定路径下的数据文件:使用 `-i` 参数加上数据文件所在路径。如前面提及,不应该使用同一个目录备份不同数据集合,也不应该在同一路径多次备份同一数据集,否则备份数据会造成覆盖或多次备份。
|
||||
- taosdump 支持数据恢复至新数据库名下,参数是 -W, 详细见命令行参数说明。
|
||||
|
||||
|
||||
:::tip
|
||||
taosdump 内部使用 TDengine stmt binding API 进行恢复数据的写入,为提高数据恢复性能,目前使用 16384 为一次写入批次。如果备份数据中有比较多列数据,可能会导致产生 "WAL size exceeds limit" 错误,此时可以通过使用 `-B` 参数调整为一个更小的值进行尝试。
|
||||
|
||||
:::
|
|
@ -6,9 +6,9 @@ toc_max_heading_level: 4
|
|||
|
||||
taosBenchmark 是 TDengine 产品性能基准测试工具,提供对 TDengine 产品写入、查询及订阅性能测试,输出性能指标。
|
||||
|
||||
## 安装
|
||||
## 工具获取
|
||||
|
||||
taosBenchmark 是 TDengine 安装包中默认安装组件,安装 TDengine 后即可使用,参考 [TDengine 安装](../../../get-started/)
|
||||
taosBenchmark 是 TDengine 服务器及客户端安装包中默认安装组件,安装后即可使用,参考 [TDengine 安装](../../../get-started/)
|
||||
|
||||
## 运行
|
||||
|
||||
|
@ -87,7 +87,7 @@ taosBenchmark -f <json file>
|
|||
|
||||
查看更多 json 配置文件示例可 [点击这里](https://github.com/taosdata/TDengine/tree/main/tools/taos-tools/example)
|
||||
|
||||
## 命令行参数详解
|
||||
## 命令行参数
|
||||
| 命令行参数 | 功能说明 |
|
||||
| ---------------------------- | ----------------------------------------------- |
|
||||
| -f/--file \<json file> | 要使用的 JSON 配置文件,由该文件指定所有参数,本参数与命令行其他参数不能同时使用。没有默认值 |
|
||||
|
@ -159,12 +159,13 @@ SUCC: insert delay, min: 19.6780ms, avg: 64.9390ms, p90: 94.6900ms, p95: 105.187
|
|||
查询性能测试主要输出查询请求速度 QPS 指标, 输出格式如下:
|
||||
``` bash
|
||||
complete query with 3 threads and 10000 query delay avg: 0.002686s min: 0.001182s max: 0.012189s p90: 0.002977s p95: 0.003493s p99: 0.004645s SQL command: select ...
|
||||
INFO: Total specified queries: 30000
|
||||
INFO: Spend 26.9530 second completed total queries: 30000, the QPS of all threads: 1113.049
|
||||
```
|
||||
- 第一行表示 3 个线程每个线程执行 10000 次查询及查询请求延时百分位分布情况,`SQL command` 为测试的查询语句
|
||||
- 第二行表示总共完成了 10000 * 3 = 30000 次查询总数
|
||||
- 第三行表示查询总耗时为 26.9653 秒,每秒查询率(QPS)为:1113.049 次/秒
|
||||
- 第二行表示查询总耗时为 26.9653 秒,每秒查询率(QPS)为:1113.049 次/秒
|
||||
- 如果在查询中设置了 `continue_if_fail` 选项为 `yes`,在最后一行中会输出失败请求个数及错误率,格式 error + 失败请求个数 (错误率)
|
||||
- QPS = 成功请求数量 / 花费时间(单位秒)
|
||||
- 错误率 = 失败请求数量 /(成功请求数量 + 失败请求数量)
|
||||
|
||||
#### 订阅指标
|
||||
|
||||
|
@ -182,7 +183,7 @@ INFO: Consumed total msgs: 3000, total rows: 30000000
|
|||
- 4 ~ 6 行是测试完成后每个消费者总体统计,统计共消费了多少条消息,共计多少行
|
||||
- 第 7 行所有消费者总体统计,`msgs` 表示共消费了多少条消息, `rows` 表示共消费了多少行数据
|
||||
|
||||
## 配置文件参数详解
|
||||
## 配置文件参数
|
||||
|
||||
### 通用配置参数
|
||||
|
||||
|
@ -203,42 +204,26 @@ INFO: Consumed total msgs: 3000, total rows: 30000000
|
|||
|
||||
插入场景下 `filetype` 必须设置为 `insert`,该参数及其它通用参数详见[通用配置参数](#通用配置参数)
|
||||
|
||||
- ** keep_trying ** : 失败后进行重试的次数,默认不重试。需使用 v3.0.9 以上版本。
|
||||
- **keep_trying** : 失败后进行重试的次数,默认不重试。需使用 v3.0.9 以上版本。
|
||||
|
||||
- ** trying_interval ** : 失败重试间隔时间,单位为毫秒,仅在 keep_trying 指定重试后有效。需使用 v3.0.9 以上版本。
|
||||
- ** childtable_from 和 childtable_to ** : 指定写入子表范围,开闭区间为 [childtable_from, childtable_to).
|
||||
- **trying_interval** : 失败重试间隔时间,单位为毫秒,仅在 keep_trying 指定重试后有效。需使用 v3.0.9 以上版本。
|
||||
- **childtable_from 和 childtable_to** : 指定写入子表范围,开闭区间为 [childtable_from, childtable_to).
|
||||
|
||||
- ** continue_if_fail ** : 允许用户定义失败后行为
|
||||
- **continue_if_fail** : 允许用户定义失败后行为
|
||||
|
||||
“continue_if_fail”: “no”, 失败 taosBenchmark 自动退出,默认行为
|
||||
“continue_if_fail”: “yes”, 失败 taosBenchmark 警告用户,并继续写入
|
||||
“continue_if_fail”: “smart”, 如果子表不存在失败,taosBenchmark 会建立子表并继续写入
|
||||
|
||||
#### 数据库相关配置参数
|
||||
#### 数据库相关
|
||||
|
||||
创建数据库时的相关参数在 json 配置文件中的 `dbinfo` 中配置,个别具体参数如下。其余参数均与 TDengine 中 `create database` 时所指定的数据库参数相对应,详见[../../taos-sql/database]
|
||||
|
||||
- **name** : 数据库名。
|
||||
|
||||
- **drop** : 数据库已存在时是否删除重建,可选项为 "yes" 或 "no", 默认为 “yes”
|
||||
- **drop** : 数据库已存在时是否删除,可选项为 "yes" 或 "no", 默认为 “yes”
|
||||
|
||||
#### 流式计算相关配置参数
|
||||
|
||||
创建流式计算的相关参数在 json 配置文件中的 `stream` 中配置,具体参数如下。
|
||||
|
||||
- **stream_name** : 流式计算的名称,必填项。
|
||||
|
||||
- **stream_stb** : 流式计算对应的超级表名称,必填项。
|
||||
|
||||
- **stream_sql** : 流式计算的sql语句,必填项。
|
||||
|
||||
- **trigger_mode** : 流式计算的触发模式,可选项。
|
||||
|
||||
- **watermark** : 流式计算的水印,可选项。
|
||||
|
||||
- **drop** : 是否创建流式计算,可选项为 "yes" 或者 "no", 为 "no" 时不创建。
|
||||
|
||||
#### 超级表相关配置参数
|
||||
#### 超级表相关
|
||||
|
||||
创建超级表时的相关参数在 json 配置文件中的 `super_tables` 中配置,具体参数如下。
|
||||
|
||||
|
@ -246,9 +231,9 @@ INFO: Consumed total msgs: 3000, total rows: 30000000
|
|||
|
||||
- **child_table_exists** : 子表是否已经存在,默认值为 "no",可选值为 "yes" 或 "no"。
|
||||
|
||||
- **child_table_count** : 子表的数量,默认值为 10。
|
||||
- **childtable_count** : 子表的数量,默认值为 10。
|
||||
|
||||
- **child_table_prefix** : 子表名称的前缀,必选配置项,没有默认值。
|
||||
- **childtable_prefix** : 子表名称的前缀,必选配置项,没有默认值。
|
||||
|
||||
- **escape_character** : 超级表和子表名称中是否包含转义字符,默认值为 "no",可选值为 "yes" 或 "no"。
|
||||
|
||||
|
@ -300,7 +285,7 @@ INFO: Consumed total msgs: 3000, total rows: 30000000
|
|||
- **sqls** : 字符串数组类型,指定超级表创建成功后要执行的 sql 数组,sql 中指定表名前面要带数据库名,否则会报未指定数据库错误
|
||||
|
||||
|
||||
#### 标签列与数据列配置参数
|
||||
#### 标签列与数据列
|
||||
|
||||
指定超级表标签列与数据列的配置参数分别在 `super_tables` 中的 `columns` 和 `tag` 中。
|
||||
|
||||
|
@ -335,19 +320,17 @@ INFO: Consumed total msgs: 3000, total rows: 30000000
|
|||
|
||||
- **fillNull**: 字符串类型,指定此列是否随机插入 NULL 值,可指定为 “true” 或 "false", 只有当 generate_row_rule 为 2 时有效
|
||||
|
||||
#### 插入行为配置参数
|
||||
#### 插入行为相关
|
||||
|
||||
- **thread_count** : 插入数据的线程数量,默认为 8。
|
||||
|
||||
- **thread_bind_vgroup** : 写入时 vgroup 是否和写入线程绑定,绑定后可提升写入速度, 取值为 "yes" 或 "no",默认值为 “no”, 设置为 “no” 后与原来行为一致。 当设为 “yes” 时,如果 thread_count 数量大小写入数据库的 vgroups 数量, thread_count 自动调整为 vgroups 数量;如果 thread_count 数量小于 vgroups 数量,写入线程数量不做调整,一个线程写完一个 vgroup 数据后再写下一个,同时保持一个 vgroup 同时只能由一个线程写入的规则。
|
||||
**thread_bind_vgroup** : 写入时 vgroup 是否和写入线程绑定,绑定后可提升写入速度, 取值为 "yes" 或 "no",默认值为 “no”, 设置为 “no” 后与原来行为一致。 当设为 “yes” 时,如果 thread_count 大于写入数据库 vgroups 数量, thread_count 自动调整为 vgroups 数量;如果 thread_count 小于 vgroups 数量,写入线程数量不做调整,一个线程写完一个 vgroup 数据后再写下一个,同时保持一个 vgroup 同时只能由一个线程写入的规则。
|
||||
|
||||
- **create_table_thread_count** : 建表的线程数量,默认为 8。
|
||||
|
||||
- **connection_pool_size** : 预先建立的与 TDengine 服务端之间的连接的数量。若不配置,则与所指定的线程数相同。
|
||||
|
||||
- **result_file** : 结果输出文件的路径,默认值为 ./output.txt。
|
||||
|
||||
- **confirm_parameter_prompt** : 开关参数,要求用户在提示后确认才能继续。默认值为 false 。
|
||||
- **confirm_parameter_prompt** : 开关参数,要求用户在提示后确认才能继续, 可取值 "yes" or "no"。默认值为 "no" 。
|
||||
|
||||
- **interlace_rows** : 启用交错插入模式并同时指定向每个子表每次插入的数据行数。交错插入模式是指依次向每张子表插入由本参数所指定的行数并重复这个过程,直到所有子表的数据都插入完成。默认值为 0, 即向一张子表完成数据插入后才会向下一张子表进行数据插入。
|
||||
在 `super_tables` 中也可以配置该参数,若配置则以 `super_tables` 中的配置为高优先级,覆盖全局设置。
|
||||
|
@ -373,16 +356,20 @@ interval 控制休眠时间,避免持续查询慢查询消耗 CPU ,单位为
|
|||
|
||||
其它通用参数详见[通用配置参数](#通用配置参数)。
|
||||
|
||||
#### 执行指定查询语句的配置参数
|
||||
#### 执行指定查询语句
|
||||
|
||||
查询指定表(可以指定超级表、子表或普通表)的配置参数在 `specified_table_query` 中设置。
|
||||
|
||||
- **mixed_query** : 查询模式,取值 “yes” 为`混合查询`, "no" 为`正常查询` , 默认值为 “no”
|
||||
`混合查询`:`sqls` 中所有 sql 按 `threads` 线程数分组,每个线程执行一组, 线程中每个 sql 都需执行 `query_times` 次查询
|
||||
`正常查询`:`sqls` 中每个 sql 启动 `threads` 个线程,每个线程执行完 `query_times` 次后退出,下个 sql 需等待上个 sql 线程全部执行完退出后方可执行
|
||||
不管 `正常查询` 还是 `混合查询` ,执行查询总次数是相同的 ,查询总次数 = `sqls` 个数 * `threads` * `query_times`, 区别是 `正常查询` 每个 sql 都会启动 `threads` 个线程,而 `混合查询` 只启动一次 `threads` 个线程执行完所有 SQL, 两者启动线程次数不一样。
|
||||
- **mixed_query** : 查询模式
|
||||
“yes” :`混合查询`
|
||||
"no"(默认值) :`普通查询`
|
||||
`普通查询`:`sqls` 中每个 sql 启动 `threads` 个线程查询此 sql, 执行完 `query_times` 次查询后退出,执行此 sql 的所有线程都完成后进入下一个 sql
|
||||
`查询总次数` = `sqls` 个数 * `query_times` * `threads`
|
||||
|
||||
`混合查询`:`sqls` 中所有 sql 分成 `threads` 个组,每个线程执行一组, 每个 sql 都需执行 `query_times` 次查询
|
||||
`查询总次数` = `sqls` 个数 * `query_times`
|
||||
|
||||
- **query_interval** : 查询时间间隔,单位是秒,默认值为 0。
|
||||
- **query_interval** : 查询时间间隔,单位: millisecond,默认值为 0。
|
||||
|
||||
- **threads** : 执行查询 SQL 的线程数,默认值为 1。
|
||||
|
||||
|
@ -390,7 +377,7 @@ interval 控制休眠时间,避免持续查询慢查询消耗 CPU ,单位为
|
|||
- **sql**: 执行的 SQL 命令,必填。
|
||||
- **result**: 保存查询结果的文件,未指定则不保存。
|
||||
|
||||
#### 查询超级表的配置参数
|
||||
#### 查询超级表
|
||||
|
||||
查询超级表的配置参数在 `super_table_query` 中设置。
|
||||
超级表查询的线程模式与上面介绍的指定查询语句查询的 `正常查询` 模式相同,不同之处是本 `sqls` 使用所有子表填充。
|
||||
|
@ -402,16 +389,14 @@ interval 控制休眠时间,避免持续查询慢查询消耗 CPU ,单位为
|
|||
- **threads** : 执行查询 SQL 的线程数,默认值为 1。
|
||||
|
||||
- **sqls** :
|
||||
- **sql** : 执行的 SQL 命令,必填;对于超级表的查询 SQL,在 SQL 命令中保留 "xxxx",程序会自动将其替换为超级表的所有子表名。
|
||||
替换为超级表中所有的子表名。
|
||||
- **sql** : 执行的 SQL 命令,必填;对于超级表的查询 SQL,在 SQL 命令中必须保留 "xxxx",会替换为超级下所有子表名后再执行。
|
||||
- **result** : 保存查询结果的文件,未指定则不保存。
|
||||
- **限制项** : sqls 下配置 sql 数组最大为 100 个
|
||||
|
||||
### 订阅场景配置参数
|
||||
|
||||
订阅场景下 `filetype` 必须设置为 `subscribe`,该参数及其它通用参数详见[通用配置参数](#通用配置参数)
|
||||
|
||||
#### 执行指定订阅语句的配置参数
|
||||
|
||||
订阅指定表(可以指定超级表、子表或者普通表)的配置参数在 `specified_table_query` 中设置。
|
||||
|
||||
- **threads/concurrent** : 执行 SQL 的线程数,默认为 1。
|
||||
|
@ -420,7 +405,7 @@ interval 控制休眠时间,避免持续查询慢查询消耗 CPU ,单位为
|
|||
- **sql** : 执行的 SQL 命令,必填。
|
||||
|
||||
|
||||
#### 配置文件中数据类型书写对照表
|
||||
### 配置文件中数据类型书写对照表
|
||||
|
||||
| # | **引擎** | **taosBenchmark**
|
||||
| --- | :----------------: | :---------------:
|
||||
|
|
|
@ -37,6 +37,9 @@ database_option: {
|
|||
| WAL_FSYNC_PERIOD value
|
||||
| WAL_RETENTION_PERIOD value
|
||||
| WAL_RETENTION_SIZE value
|
||||
| COMPACT_INTERVAL value
|
||||
| COMPACT_TIME_RANGE value
|
||||
| COMPACT_TIME_OFFSET value
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -81,6 +84,10 @@ database_option: {
|
|||
- WAL_FSYNC_PERIOD:当 WAL_LEVEL 参数设置为 2 时,用于设置落盘的周期。默认为 3000,单位毫秒。最小为 0,表示每次写入立即落盘;最大为 180000,即三分钟。
|
||||
- WAL_RETENTION_PERIOD: 为了数据订阅消费,需要 WAL 日志文件额外保留的最大时长策略。WAL 日志清理,不受订阅客户端消费状态影响。单位为 s。默认为 3600,表示在 WAL 保留最近 3600 秒的数据,请根据数据订阅的需要修改这个参数为适当值。
|
||||
- WAL_RETENTION_SIZE:为了数据订阅消费,需要 WAL 日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0,表示累计大小无上限。
|
||||
- COMPACT_INTERVAL:自动 compact 触发周期(从 1970-01-01T00:00:00Z 开始切分的时间周期)。取值范围:0 或 [10m, keep2],单位:m(分钟),h(小时),d(天)。不加时间单位默认单位为天,默认值为 0,即不触发自动 compact 功能。如果 db 中有未完成的 compact 任务,不重复下发 compact 任务。仅企业版 3.3.5.0 版本开始支持。
|
||||
- COMPACT_TIME_RANGE:自动 compact 任务触发的 compact 时间范围,取值范围:[-keep2, -duration],单位:m(分钟),h(小时),d(天)。不加时间单位时默认单位为天,默认值为 [0, 0]。取默认值 [0, 0] 时,如果 COMPACT_INTERVAL 大于 0,会按照 [-keep2, -duration] 下发自动 compact。因此,要关闭自动 compact 功能,需要将 COMPACT_INTERVAL 设置为 0。仅企业版 3.3.5.0 版本开始支持。
|
||||
- COMPACT_TIME_OFFSET:自动 compact 任务触发的 compact 时间相对本地时间的偏移量。取值范围:[0,23],单位: h(小时),默认值为 0。以 UTC 0 时区为例,如果 COMPACT_INTERVAL 为 1d,当 COMPACT_TIME_OFFSET 为 0 时,在每天 0 点下发自动 compact,如果 COMPACT_TIME_OFFSET 为 2,在每天 2 点下发自动 compact。仅企业版 3.3.5.0 版本开始支持。
|
||||
-
|
||||
|
||||
### 创建数据库示例
|
||||
|
||||
|
@ -127,6 +134,9 @@ alter_database_option: {
|
|||
| WAL_RETENTION_PERIOD value
|
||||
| WAL_RETENTION_SIZE value
|
||||
| MINROWS value
|
||||
| COMPACT_INTERVAL value
|
||||
| COMPACT_TIME_RANGE value
|
||||
| COMPACT_TIME_OFFSET value
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -215,7 +225,7 @@ SHOW db_name.ALIVE;
|
|||
|
||||
查询数据库 db_name 的可用状态,返回值 0:不可用 1:完全可用 2:部分可用(即数据库包含的 VNODE 部分节点可用,部分节点不可用)
|
||||
|
||||
## 查看DB 的磁盘空间占用
|
||||
## 查看 DB 的磁盘空间占用
|
||||
|
||||
```sql
|
||||
select * from INFORMATION_SCHEMA.INS_DISK_USAGE where db_name = 'db_name'
|
||||
|
|
|
@ -137,7 +137,7 @@ const taos = require("@tdengine/websocket");
|
|||
async function createConnect() {
|
||||
try {
|
||||
let url = 'ws://127.0.0.1:6041'
|
||||
let conf = WsSql.NewConfig(url)
|
||||
let conf = new taos.WSConfig(url)
|
||||
conf.setUser('root')
|
||||
conf.setPwd('taosdata')
|
||||
conf.setDb('db')
|
||||
|
|
|
@ -22,14 +22,15 @@ import RequestId from "./_request_id.mdx";
|
|||
|
||||
## 版本历史
|
||||
|
||||
| Connector 版本 | 主要变化 | TDengine 版本 |
|
||||
|:-------------|:---------------------------|:--------------|
|
||||
| 3.1.5 | 修复 websocket 协议编码中文时长度错误 | - |
|
||||
| 3.1.4 | 提升 websocket 查询和写入性能 | 3.3.2.0 及更高版本 |
|
||||
| 3.1.3 | 支持 WebSocket 自动重连 | - |
|
||||
| 3.1.2 | 修复 schemaless 资源释放 | - |
|
||||
| 3.1.1 | 支持 varbinary 和 geometry 类型 | - |
|
||||
| 3.1.0 | WebSocket 使用原生实现 | 3.2.1.0 及更高版本 |
|
||||
| Connector 版本 | 主要变化 | TDengine 版本 |
|
||||
|:-------------|:----------------------------|:--------------|
|
||||
| 3.1.6 | 优化 WebSocket 连接接收消息处理。 | - |
|
||||
| 3.1.5 | 修复 WebSocket 协议编码中文时长度错误。 | - |
|
||||
| 3.1.4 | 提升 WebSocket 查询和写入性能。 | 3.3.2.0 及更高版本 |
|
||||
| 3.1.3 | 支持 WebSocket 自动重连。 | - |
|
||||
| 3.1.2 | 修复 schemaless 资源释放。 | - |
|
||||
| 3.1.1 | 支持 varbinary 和 geometry 类型。 | - |
|
||||
| 3.1.0 | WebSocket 使用原生实现。 | 3.2.1.0 及更高版本 |
|
||||
|
||||
## 处理异常
|
||||
|
||||
|
@ -53,14 +54,14 @@ TDengine 其他功能模块的报错,请参考 [错误码](../../../reference/
|
|||
| DOUBLE | double |
|
||||
| BOOL | bool |
|
||||
| BINARY | byte[] |
|
||||
| NCHAR | string (utf-8编码) |
|
||||
| NCHAR | string |
|
||||
| JSON | byte[] |
|
||||
| VARBINARY | byte[] |
|
||||
| GEOMETRY | byte[] |
|
||||
|
||||
**注意**:JSON 类型仅在 tag 中支持。
|
||||
GEOMETRY类型是 little endian 字节序的二进制数据,符合 WKB 规范。详细信息请参考 [数据类型](../../taos-sql/data-type/#数据类型)
|
||||
WKB规范请参考[Well-Known Binary (WKB)](https://libgeos.org/specifications/wkb/)
|
||||
GEOMETRY类型是 little endian 字节序的二进制数据,符合 WKB 规范。详细信息请参考 [数据类型](../../taos-sql/data-type/#数据类型)。
|
||||
WKB规范请参考[Well-Known Binary (WKB)](https://libgeos.org/specifications/wkb/)。
|
||||
|
||||
## 示例程序汇总
|
||||
|
||||
|
|
|
@ -310,5 +310,17 @@ TDinsight插件中展示的数据是通过taosKeeper和taosAdapter服务收集
|
|||
### 34 超级表带 TAG 过滤查子查数据与直接查子表哪个块?
|
||||
直接查子表更快。超级表带 TAG 过滤查询子查数据是为满足查询方便性,同时可对多个子表中数据进行过滤,如果目的是追求性能并已明确查询子表,直接从子表查性能更高
|
||||
|
||||
### 35 如何查看数据压缩率指标?
|
||||
TDengine 目前只提供以表为统计单位的压缩率,数据库及整体还未提供,查看命令是在客户端 taos-CLI 中执行 `SHOW TABLE DISTRIBUTED table_name;` 命令,table_name 为要查看压缩率的表,可以为超级表、普通表及子表,详细可 [查看此处](https://docs.taosdata.com/reference/taos-sql/show/#show-table-distributed)
|
||||
### 35 如何查看数据库的数据压缩率和磁盘占用指标?
|
||||
TDengine 3.3.5.0 之前的版本,只提供以表为统计单位的压缩率,数据库及整体还未提供,查看命令是在客户端 taos-CLI 中执行 `SHOW TABLE DISTRIBUTED table_name;` 命令,table_name 为要查看压缩率的表,可以为超级表、普通表及子表,详细可 [查看此处](https://docs.taosdata.com/reference/taos-sql/show/#show-table-distributed)
|
||||
|
||||
TDengine 3.3.5.0 及以上的版本,还提供了数据库整体压缩率和磁盘空间占用统计。查看数据库整体的数据压缩率和磁盘空间占用的命令为 `SHOW db_name.disk_info;`,查看数据库各个模块的磁盘空间占用的命令为 `SELECT * FROM INFORMATION_SCHEMA.INS_DISK_USAGE WHERE db_name='db_name';`,db_name 为要查看的数据库名称。详细可 [查看此处](https://docs.taosdata.com/reference/taos-sql/database/#%E6%9F%A5%E7%9C%8B-db-%E7%9A%84%E7%A3%81%E7%9B%98%E7%A9%BA%E9%97%B4%E5%8D%A0%E7%94%A8)
|
||||
|
||||
### 36 短时间内,通过 systemd 重启 taosd 超过一定次数后重启失败,报错:start-limit-hit。
|
||||
问题描述:
|
||||
TDengine 3.3.5.1 及以上的版本,taosd.service 的 systemd 配置文件中,StartLimitInterval 参数从 60 秒调整为 900 秒。若在 900 秒内 taosd 服务重启达到 3 次,后续通过 systemd 启动 taosd 服务时会失败,执行 `systemctl status taosd.service` 显示错误:Failed with result 'start-limit-hit'。
|
||||
|
||||
问题原因:
|
||||
TDengine 3.3.5.1 之前的版本,StartLimitInterval 为 60 秒。若在 60 秒内无法完成 3 次重启(例如,因从 WAL(预写式日志)中恢复大量数据导致启动时间较长),则下一个 60 秒周期内的重启会重新计数,导致系统持续不断地重启 taosd 服务。为避免无限重启问题,将 StartLimitInterval 由 60 秒调整为 900 秒。因此,在使用 systemd 短时间内多次启动 taosd 时遇到 start-limit-hit 错误的机率增多。
|
||||
|
||||
问题解决:
|
||||
1)通过 systemd 重启 taosd 服务:推荐方法是先执行命令 `systemctl reset-failed taosd.service` 重置失败计数器,然后再通过 `systemctl restart taosd.service` 重启;若需长期调整,可手动修改 /etc/systemd/system/taosd.service 文件,将 StartLimitInterval 调小或将 StartLimitBurst 调大(注:重新安装 taosd 会重置该参数,需要重新修改),执行 `systemctl daemon-reload` 重新加载配置,然后再重启。2)也可以不通过 systemd 而是通过 taosd 命令直接重启 taosd 服务,此时不受 StartLimitInterval 和 StartLimitBurst 参数限制。
|
||||
|
|
|
@ -31,9 +31,7 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL)
|
||||
#include "cus_name.h"
|
||||
#endif
|
||||
|
||||
#ifdef WINDOWS
|
||||
|
||||
|
|
|
@ -16,12 +16,23 @@
|
|||
#ifndef _CUS_NAME_H_
|
||||
#define _CUS_NAME_H_
|
||||
|
||||
//
|
||||
// support OEM
|
||||
//
|
||||
#ifndef TD_PRODUCT_NAME
|
||||
#ifdef TD_ENTERPRISE
|
||||
#define TD_PRODUCT_NAME "TDengine Enterprise Edition"
|
||||
#else
|
||||
#define TD_PRODUCT_NAME "TDengine Community Edition"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef CUS_NAME
|
||||
#define CUS_NAME "TDengine"
|
||||
#endif
|
||||
|
||||
#ifndef CUS_PROMPT
|
||||
#define CUS_PROMPT "taos"
|
||||
#define CUS_PROMPT "taos"
|
||||
#endif
|
||||
|
||||
#ifndef CUS_EMAIL
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#define _TD_UTIL_DEF_H_
|
||||
|
||||
#include "os.h"
|
||||
#include "cus_name.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -79,14 +80,6 @@ extern const int32_t TYPE_BYTES[21];
|
|||
#define TSDB_DEFAULT_PASS "taosdata"
|
||||
#endif
|
||||
|
||||
#ifndef TD_PRODUCT_NAME
|
||||
#ifdef TD_ENTERPRISE
|
||||
#define TD_PRODUCT_NAME "TDengine Enterprise Edition"
|
||||
#else
|
||||
#define TD_PRODUCT_NAME "TDengine Community Edition"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define TSDB_TRUE 1
|
||||
#define TSDB_FALSE 0
|
||||
#define TSDB_OK 0
|
||||
|
|
|
@ -38,13 +38,7 @@
|
|||
#include "tversion.h"
|
||||
#include "tconv.h"
|
||||
|
||||
#if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL)
|
||||
#include "cus_name.h"
|
||||
#endif
|
||||
|
||||
#ifndef CUS_PROMPT
|
||||
#define CUS_PROMPT "taos"
|
||||
#endif
|
||||
|
||||
#define TSC_VAR_NOT_RELEASE 1
|
||||
#define TSC_VAR_RELEASED 0
|
||||
|
|
|
@ -28,9 +28,6 @@
|
|||
|
||||
#include "tutil.h"
|
||||
|
||||
#if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL)
|
||||
#include "cus_name.h"
|
||||
#endif
|
||||
|
||||
#define CONFIG_PATH_LEN (TSDB_FILENAME_LEN + 12)
|
||||
#define CONFIG_FILE_LEN (CONFIG_PATH_LEN + 32)
|
||||
|
@ -960,7 +957,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
|
||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "s3Accesskey", tsS3AccessKey[0], CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER_LAZY,CFG_CATEGORY_GLOBAL));
|
||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "s3Endpoint", tsS3Endpoint[0], CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER_LAZY,CFG_CATEGORY_GLOBAL));
|
||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "s3BucketName", tsS3BucketName, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER_LAZY,CFG_CATEGORY_GLOBAL));
|
||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "s3BucketName", tsS3BucketName, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER_LAZY,CFG_CATEGORY_LOCAL));
|
||||
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "s3PageCacheSize", tsS3PageCacheSize, 4, 1024 * 1024 * 1024, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER_LAZY,CFG_CATEGORY_GLOBAL));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "s3UploadDelaySec", tsS3UploadDelaySec, 1, 60 * 60 * 24 * 30, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER,CFG_CATEGORY_GLOBAL));
|
||||
|
|
|
@ -21,28 +21,17 @@
|
|||
#include "tglobal.h"
|
||||
#include "version.h"
|
||||
#include "tconv.h"
|
||||
#ifdef TD_JEMALLOC_ENABLED
|
||||
#include "jemalloc/jemalloc.h"
|
||||
#endif
|
||||
#include "dmUtil.h"
|
||||
#include "tcs.h"
|
||||
#include "qworker.h"
|
||||
|
||||
#if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL)
|
||||
#ifdef TD_JEMALLOC_ENABLED
|
||||
#define ALLOW_FORBID_FUNC
|
||||
#include "jemalloc/jemalloc.h"
|
||||
#endif
|
||||
|
||||
#include "cus_name.h"
|
||||
#else
|
||||
#ifndef CUS_NAME
|
||||
#define CUS_NAME "TDengine"
|
||||
#endif
|
||||
|
||||
#ifndef CUS_PROMPT
|
||||
#define CUS_PROMPT "taos"
|
||||
#endif
|
||||
|
||||
#ifndef CUS_EMAIL
|
||||
#define CUS_EMAIL "<support@taosdata.com>"
|
||||
#endif
|
||||
#endif
|
||||
// clang-format off
|
||||
#define DM_APOLLO_URL "The apollo string to use when configuring the server, such as: -a 'jsonFile:./tests/cfg.json', cfg.json text can be '{\"fqdn\":\"td1\"}'."
|
||||
#define DM_CFG_DIR "Configuration directory."
|
||||
|
|
|
@ -1016,15 +1016,15 @@ SArray *vmGetMsgHandles() {
|
|||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_RESUME, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_STOP, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_CHECK_POINT_SOURCE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECKPOINT_READY, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECKPOINT_READY_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_TRIGGER, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_TRIGGER_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECKPOINT_READY, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECKPOINT_READY_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_TRIGGER, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_TRIGGER_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_UPDATE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_RESET, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_HEARTBEAT_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_REQ_CHKPT_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_CHKPT_REPORT_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_REQ_CHKPT_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_CHKPT_REPORT_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_GET_STREAM_PROGRESS, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_UPDATE_CHKPT, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
|
|
|
@ -396,7 +396,8 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal,
|
|||
|
||||
tqNotifyClose(pVnode->pImpl->pTq);
|
||||
|
||||
dInfo("vgId:%d, wait for vnode stream queue:%p is empty", pVnode->vgId, pVnode->pStreamQ);
|
||||
dInfo("vgId:%d, wait for vnode stream queue:%p is empty, %d remains", pVnode->vgId,
|
||||
pVnode->pStreamQ, taosQueueItemSize(pVnode->pStreamQ));
|
||||
while (!taosQueueEmpty(pVnode->pStreamQ)) taosMsleep(10);
|
||||
|
||||
dInfo("vgId:%d, wait for vnode stream ctrl queue:%p is empty", pVnode->vgId, pVnode->pStreamCtrlQ);
|
||||
|
|
|
@ -1158,51 +1158,22 @@ int32_t extractStreamNodeList(SMnode *pMnode) {
|
|||
}
|
||||
|
||||
static int32_t mndCheckTaskAndNodeStatus(SMnode *pMnode) {
|
||||
bool ready = true;
|
||||
int32_t code = 0;
|
||||
if (mndStreamNodeIsUpdated(pMnode)) {
|
||||
TAOS_RETURN(TSDB_CODE_STREAM_TASK_IVLD_STATUS);
|
||||
return TSDB_CODE_STREAM_TASK_IVLD_STATUS;
|
||||
}
|
||||
|
||||
streamMutexLock(&execInfo.lock);
|
||||
if (taosArrayGetSize(execInfo.pNodeList) == 0) {
|
||||
mDebug("stream task node change checking done, no vgroups exist, do nothing");
|
||||
if (taosArrayGetSize(execInfo.pTaskList) != 0) {
|
||||
streamMutexUnlock(&execInfo.lock);
|
||||
mError("stream task node change checking done, no vgroups exist, but task list is not empty");
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < taosArrayGetSize(execInfo.pTaskList); ++i) {
|
||||
STaskId *p = taosArrayGet(execInfo.pTaskList, i);
|
||||
if (p == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
STaskStatusEntry *pEntry = taosHashGet(execInfo.pTaskMap, p, sizeof(*p));
|
||||
if (pEntry == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pEntry->status != TASK_STATUS__READY) {
|
||||
mDebug("s-task:0x%" PRIx64 "-0x%x (nodeId:%d) status:%s, checkpoint not issued", pEntry->id.streamId,
|
||||
(int32_t)pEntry->id.taskId, pEntry->nodeId, streamTaskGetStatusStr(pEntry->status));
|
||||
ready = false;
|
||||
break;
|
||||
}
|
||||
|
||||
if (pEntry->hTaskId != 0) {
|
||||
mDebug("s-task:0x%" PRIx64 "-0x%x (nodeId:%d) status:%s related fill-history task:0x%" PRIx64
|
||||
" exists, checkpoint not issued",
|
||||
pEntry->id.streamId, (int32_t)pEntry->id.taskId, pEntry->nodeId, streamTaskGetStatusStr(pEntry->status),
|
||||
pEntry->hTaskId);
|
||||
ready = false;
|
||||
break;
|
||||
code = TSDB_CODE_STREAM_TASK_IVLD_STATUS;
|
||||
}
|
||||
}
|
||||
|
||||
streamMutexUnlock(&execInfo.lock);
|
||||
return ready ? 0 : -1;
|
||||
return code;
|
||||
}
|
||||
|
||||
int64_t getStreamTaskLastReadyState(SArray *pTaskList, int64_t streamId) {
|
||||
|
@ -1216,7 +1187,22 @@ int64_t getStreamTaskLastReadyState(SArray *pTaskList, int64_t streamId) {
|
|||
continue;
|
||||
}
|
||||
|
||||
if (pEntry->status == TASK_STATUS__READY && ts < pEntry->startTime) {
|
||||
// -1 denote not ready now or never ready till now
|
||||
if (pEntry->hTaskId != 0) {
|
||||
mInfo("s-task:0x%" PRIx64 "-0x%x (nodeId:%d) status:%s related fill-history task:0x%" PRIx64
|
||||
" exists, checkpoint not issued",
|
||||
pEntry->id.streamId, (int32_t)pEntry->id.taskId, pEntry->nodeId, streamTaskGetStatusStr(pEntry->status),
|
||||
pEntry->hTaskId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pEntry->status != TASK_STATUS__READY) {
|
||||
mInfo("s-task:0x%" PRIx64 "-0x%x (nodeId:%d) status:%s, not ready for checkpoint", pEntry->id.streamId,
|
||||
(int32_t)pEntry->id.taskId, pEntry->nodeId, streamTaskGetStatusStr(pEntry->status));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (ts < pEntry->startTime) {
|
||||
ts = pEntry->startTime;
|
||||
taskId = pEntry->id.taskId;
|
||||
}
|
||||
|
@ -1249,11 +1235,11 @@ static bool isStreamReadyHelp(int64_t now, SStreamObj* pStream) {
|
|||
|
||||
int64_t lastReadyTs = getStreamTaskLastReadyState(execInfo.pTaskList, pStream->uid);
|
||||
if ((lastReadyTs == -1) || ((lastReadyTs != -1) && ((now - lastReadyTs) < tsStreamCheckpointInterval * 1000))) {
|
||||
|
||||
if (lastReadyTs != -1) {
|
||||
mInfo("not start checkpoint, stream:0x%"PRIx64" last ready ts:%"PRId64" ready duration:%"PRId64" less than threshold",
|
||||
pStream->uid, lastReadyTs, now - lastReadyTs);
|
||||
} else {
|
||||
mInfo("not start checkpoint, stream:0x%"PRIx64" not ready now", pStream->uid);
|
||||
mInfo("not start checkpoint, stream:0x%" PRIx64 " last ready ts:%" PRId64 " ready duration:%" PRId64
|
||||
"ms less than threshold",
|
||||
pStream->uid, lastReadyTs, (now - lastReadyTs));
|
||||
}
|
||||
|
||||
ready = false;
|
||||
|
@ -1274,7 +1260,7 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) {
|
|||
int32_t numOfCheckpointTrans = 0;
|
||||
|
||||
if ((code = mndCheckTaskAndNodeStatus(pMnode)) != 0) {
|
||||
TAOS_RETURN(TSDB_CODE_STREAM_TASK_IVLD_STATUS);
|
||||
return TSDB_CODE_STREAM_TASK_IVLD_STATUS;
|
||||
}
|
||||
|
||||
SArray *pList = taosArrayInit(4, sizeof(SCheckpointInterval));
|
||||
|
@ -1326,7 +1312,7 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
int32_t numOfQual = taosArrayGetSize(pList);
|
||||
if (numOfCheckpointTrans > tsMaxConcurrentCheckpoint) {
|
||||
if (numOfCheckpointTrans >= tsMaxConcurrentCheckpoint) {
|
||||
mDebug(
|
||||
"%d stream(s) checkpoint interval longer than %ds, ongoing checkpoint trans:%d reach maximum allowed:%d, new "
|
||||
"checkpoint trans are not allowed, wait for 30s",
|
||||
|
@ -2601,20 +2587,51 @@ static void doSendQuickRsp(SRpcHandleInfo *pInfo, int32_t msgSize, int32_t vgId,
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t doCleanReqList(SArray* pList, SCheckpointConsensusInfo* pInfo) {
|
||||
int32_t alreadySend = taosArrayGetSize(pList);
|
||||
|
||||
for (int32_t i = 0; i < alreadySend; ++i) {
|
||||
int32_t *taskId = taosArrayGet(pList, i);
|
||||
if (taskId == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (int32_t k = 0; k < taosArrayGetSize(pInfo->pTaskList); ++k) {
|
||||
SCheckpointConsensusEntry *pe = taosArrayGet(pInfo->pTaskList, k);
|
||||
if ((pe != NULL) && (pe->req.taskId == *taskId)) {
|
||||
taosArrayRemove(pInfo->pTaskList, k);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return alreadySend;
|
||||
}
|
||||
|
||||
int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) {
|
||||
SMnode *pMnode = pMsg->info.node;
|
||||
int64_t now = taosGetTimestampMs();
|
||||
bool allReady = true;
|
||||
SArray *pNodeSnapshot = NULL;
|
||||
int32_t maxAllowedTrans = 50;
|
||||
int32_t numOfTrans = 0;
|
||||
int32_t code = 0;
|
||||
void *pIter = NULL;
|
||||
|
||||
SArray *pList = taosArrayInit(4, sizeof(int32_t));
|
||||
if (pList == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
SArray *pStreamList = taosArrayInit(4, sizeof(int64_t));
|
||||
if (pStreamList == NULL) {
|
||||
taosArrayDestroy(pList);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
mDebug("start to process consensus-checkpointId in tmr");
|
||||
|
||||
bool allReady = true;
|
||||
SArray *pNodeSnapshot = NULL;
|
||||
|
||||
int32_t code = mndTakeVgroupSnapshot(pMnode, &allReady, &pNodeSnapshot);
|
||||
code = mndTakeVgroupSnapshot(pMnode, &allReady, &pNodeSnapshot);
|
||||
taosArrayDestroy(pNodeSnapshot);
|
||||
if (code) {
|
||||
mError("failed to get the vgroup snapshot, ignore it and continue");
|
||||
|
@ -2623,28 +2640,30 @@ int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) {
|
|||
if (!allReady) {
|
||||
mWarn("not all vnodes are ready, end to process the consensus-checkpointId in tmr process");
|
||||
taosArrayDestroy(pStreamList);
|
||||
taosArrayDestroy(pList);
|
||||
return 0;
|
||||
}
|
||||
|
||||
streamMutexLock(&execInfo.lock);
|
||||
|
||||
void *pIter = NULL;
|
||||
while ((pIter = taosHashIterate(execInfo.pStreamConsensus, pIter)) != NULL) {
|
||||
SCheckpointConsensusInfo *pInfo = (SCheckpointConsensusInfo *)pIter;
|
||||
|
||||
int64_t streamId = -1;
|
||||
int32_t num = taosArrayGetSize(pInfo->pTaskList);
|
||||
SArray *pList = taosArrayInit(4, sizeof(int32_t));
|
||||
if (pList == NULL) {
|
||||
continue;
|
||||
}
|
||||
taosArrayClear(pList);
|
||||
|
||||
int64_t streamId = -1;
|
||||
int32_t num = taosArrayGetSize(pInfo->pTaskList);
|
||||
SStreamObj *pStream = NULL;
|
||||
|
||||
code = mndGetStreamObj(pMnode, pInfo->streamId, &pStream);
|
||||
if (pStream == NULL || code != 0) { // stream has been dropped already
|
||||
mDebug("stream:0x%" PRIx64 " dropped already, continue", pInfo->streamId);
|
||||
void *p = taosArrayPush(pStreamList, &pInfo->streamId);
|
||||
taosArrayDestroy(pList);
|
||||
if (p == NULL) {
|
||||
mError("failed to record the missing stream id in concensus-stream list, streamId:%" PRId64
|
||||
" code:%s, continue",
|
||||
pInfo->streamId, tstrerror(terrno));
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -2654,7 +2673,9 @@ int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) {
|
|||
continue;
|
||||
}
|
||||
|
||||
streamId = pe->req.streamId;
|
||||
if (streamId == -1) {
|
||||
streamId = pe->req.streamId;
|
||||
}
|
||||
|
||||
int32_t existed = 0;
|
||||
bool allSame = true;
|
||||
|
@ -2665,7 +2686,7 @@ int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) {
|
|||
break;
|
||||
}
|
||||
|
||||
if (((now - pe->ts) >= 10 * 1000) || allSame) {
|
||||
if (((now - pe->ts) >= 10 * 1000) && allSame) {
|
||||
mDebug("s-task:0x%x sendTs:%" PRId64 " wait %.2fs and all tasks have same checkpointId", pe->req.taskId,
|
||||
pe->req.startTs, (now - pe->ts) / 1000.0);
|
||||
if (chkId > pe->req.checkpointId) {
|
||||
|
@ -2673,8 +2694,12 @@ int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) {
|
|||
taosArrayDestroy(pStreamList);
|
||||
mError("s-task:0x%x checkpointId:%" PRId64 " is updated to %" PRId64 ", update it", pe->req.taskId,
|
||||
pe->req.checkpointId, chkId);
|
||||
|
||||
mndReleaseStream(pMnode, pStream);
|
||||
taosHashCancelIterate(execInfo.pStreamConsensus, pIter);
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
code = mndCreateSetConsensusChkptIdTrans(pMnode, pStream, pe->req.taskId, chkId, pe->req.startTs);
|
||||
if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||
mError("failed to create consensus-checkpoint trans, stream:0x%" PRIx64, pStream->uid);
|
||||
|
@ -2684,7 +2709,6 @@ int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) {
|
|||
if (p == NULL) {
|
||||
mError("failed to put into task list, taskId:0x%x", pe->req.taskId);
|
||||
}
|
||||
streamId = pe->req.streamId;
|
||||
} else {
|
||||
mDebug("s-task:0x%x sendTs:%" PRId64 " wait %.2fs already, wait for next round to check", pe->req.taskId,
|
||||
pe->req.startTs, (now - pe->ts) / 1000.0);
|
||||
|
@ -2693,38 +2717,27 @@ int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) {
|
|||
|
||||
mndReleaseStream(pMnode, pStream);
|
||||
|
||||
if (taosArrayGetSize(pList) > 0) {
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pList); ++i) {
|
||||
int32_t *taskId = taosArrayGet(pList, i);
|
||||
if (taskId == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (int32_t k = 0; k < taosArrayGetSize(pInfo->pTaskList); ++k) {
|
||||
SCheckpointConsensusEntry *pe = taosArrayGet(pInfo->pTaskList, k);
|
||||
if ((pe != NULL) && (pe->req.taskId == *taskId)) {
|
||||
taosArrayRemove(pInfo->pTaskList, k);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
taosArrayDestroy(pList);
|
||||
int32_t alreadySend = doCleanReqList(pList, pInfo);
|
||||
|
||||
// clear request stream item with empty task list
|
||||
if (taosArrayGetSize(pInfo->pTaskList) == 0) {
|
||||
mndClearConsensusRspEntry(pInfo);
|
||||
if (streamId == -1) {
|
||||
streamMutexUnlock(&execInfo.lock);
|
||||
taosArrayDestroy(pStreamList);
|
||||
mError("streamId is -1, streamId:%" PRIx64, pInfo->streamId);
|
||||
return TSDB_CODE_FAILED;
|
||||
mError("streamId is -1, streamId:%" PRIx64" in consensus-checkpointId hashMap, cont", pInfo->streamId);
|
||||
}
|
||||
|
||||
void *p = taosArrayPush(pStreamList, &streamId);
|
||||
if (p == NULL) {
|
||||
mError("failed to put into stream list, stream:0x%" PRIx64, streamId);
|
||||
mError("failed to put into stream list, stream:0x%" PRIx64 " not remove it in consensus-chkpt list", streamId);
|
||||
}
|
||||
}
|
||||
|
||||
numOfTrans += alreadySend;
|
||||
if (numOfTrans > maxAllowedTrans) {
|
||||
mInfo("already send consensus-checkpointId trans:%d, try next time", alreadySend);
|
||||
taosHashCancelIterate(execInfo.pStreamConsensus, pIter);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pStreamList); ++i) {
|
||||
|
@ -2739,7 +2752,9 @@ int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) {
|
|||
streamMutexUnlock(&execInfo.lock);
|
||||
|
||||
taosArrayDestroy(pStreamList);
|
||||
mDebug("end to process consensus-checkpointId in tmr");
|
||||
taosArrayDestroy(pList);
|
||||
|
||||
mDebug("end to process consensus-checkpointId in tmr, send consensus-checkpoint trans:%d", numOfTrans);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
|
|
@ -814,17 +814,18 @@ int32_t mndScanCheckpointReportInfo(SRpcMsg *pReq) {
|
|||
|
||||
int32_t mndCreateSetConsensusChkptIdTrans(SMnode *pMnode, SStreamObj *pStream, int32_t taskId, int64_t checkpointId,
|
||||
int64_t ts) {
|
||||
char msg[128] = {0};
|
||||
char msg[128] = {0};
|
||||
STrans *pTrans = NULL;
|
||||
SStreamTask *pTask = NULL;
|
||||
|
||||
snprintf(msg, tListLen(msg), "set consen-chkpt-id for task:0x%x", taskId);
|
||||
|
||||
STrans *pTrans = NULL;
|
||||
int32_t code = doCreateTrans(pMnode, pStream, NULL, TRN_CONFLICT_NOTHING, MND_STREAM_CHKPT_CONSEN_NAME, msg, &pTrans);
|
||||
if (pTrans == NULL || code != 0) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
STaskId id = {.streamId = pStream->uid, .taskId = taskId};
|
||||
SStreamTask *pTask = NULL;
|
||||
STaskId id = {.streamId = pStream->uid, .taskId = taskId};
|
||||
code = mndGetStreamTask(&id, pStream, &pTask);
|
||||
if (code) {
|
||||
mError("failed to get task:0x%x in stream:%s, failed to create consensus-checkpointId", taskId, pStream->name);
|
||||
|
|
|
@ -402,6 +402,7 @@ int32_t metaStatsCacheUpsert(SMeta* pMeta, SMetaStbStats* pInfo) {
|
|||
|
||||
if (*ppEntry) { // update
|
||||
(*ppEntry)->info.ctbNum = pInfo->ctbNum;
|
||||
(*ppEntry)->info.colNum = pInfo->colNum;
|
||||
} else { // insert
|
||||
if (pCache->sStbStatsCache.nEntry >= pCache->sStbStatsCache.nBucket) {
|
||||
TAOS_UNUSED(metaRehashStatsCache(pCache, 1));
|
||||
|
|
|
@ -10,14 +10,16 @@
|
|||
|
||||
#include "meta.h"
|
||||
|
||||
extern SDmNotifyHandle dmNotifyHdl;
|
||||
|
||||
int32_t metaCloneEntry(const SMetaEntry *pEntry, SMetaEntry **ppEntry);
|
||||
void metaCloneEntryFree(SMetaEntry **ppEntry);
|
||||
void metaDestroyTagIdxKey(STagIdxKey *pTagIdxKey);
|
||||
int metaSaveJsonVarToIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry, const SSchema *pSchema);
|
||||
int metaDelJsonVarFromIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry, const SSchema *pSchema);
|
||||
void metaTimeSeriesNotifyCheck(SMeta *pMeta);
|
||||
int tagIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
|
||||
|
||||
static void metaTimeSeriesNotifyCheck(SMeta *pMeta);
|
||||
static int32_t metaGetChildUidsOfSuperTable(SMeta *pMeta, tb_uid_t suid, SArray **childList);
|
||||
static int32_t metaFetchTagIdxKey(SMeta *pMeta, const SMetaEntry *pEntry, const SSchema *pTagColumn,
|
||||
STagIdxKey **ppTagIdxKey, int32_t *pTagIdxKeySize);
|
||||
|
@ -990,6 +992,20 @@ static int32_t metaTtlIdxDelete(SMeta *pMeta, const SMetaHandleParam *pParam) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static void metaTimeSeriesNotifyCheck(SMeta *pMeta) {
|
||||
#if defined(TD_ENTERPRISE)
|
||||
int64_t nTimeSeries = metaGetTimeSeriesNum(pMeta, 0);
|
||||
int64_t deltaTS = nTimeSeries - pMeta->pVnode->config.vndStats.numOfReportedTimeSeries;
|
||||
if (deltaTS > tsTimeSeriesThreshold) {
|
||||
if (0 == atomic_val_compare_exchange_8(&dmNotifyHdl.state, 1, 2)) {
|
||||
if (tsem_post(&dmNotifyHdl.sem) != 0) {
|
||||
metaError("vgId:%d, failed to post semaphore, errno:%d", TD_VID(pMeta->pVnode), errno);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static int32_t (*metaTableOpFn[META_TABLE_MAX][META_TABLE_OP_MAX])(SMeta *pMeta, const SMetaHandleParam *pParam) =
|
||||
{
|
||||
[META_ENTRY_TABLE] =
|
||||
|
@ -1139,6 +1155,7 @@ static int32_t metaHandleNormalTableCreate(SMeta *pMeta, const SMetaEntry *pEntr
|
|||
metaError("vgId:%d, failed to create table:%s since %s", TD_VID(pMeta->pVnode), pEntry->name, tstrerror(rc));
|
||||
}
|
||||
}
|
||||
metaTimeSeriesNotifyCheck(pMeta);
|
||||
} else {
|
||||
metaErr(TD_VID(pMeta->pVnode), code);
|
||||
}
|
||||
|
@ -1214,7 +1231,7 @@ static int32_t metaHandleChildTableCreate(SMeta *pMeta, const SMetaEntry *pEntry
|
|||
if (ret < 0) {
|
||||
metaErr(TD_VID(pMeta->pVnode), ret);
|
||||
}
|
||||
pMeta->pVnode->config.vndStats.numOfNTimeSeries += (nCols - 1);
|
||||
pMeta->pVnode->config.vndStats.numOfTimeSeries += (nCols > 0 ? nCols - 1 : 0);
|
||||
}
|
||||
|
||||
if (!TSDB_CACHE_NO(pMeta->pVnode->config)) {
|
||||
|
@ -1228,7 +1245,7 @@ static int32_t metaHandleChildTableCreate(SMeta *pMeta, const SMetaEntry *pEntry
|
|||
} else {
|
||||
metaErr(TD_VID(pMeta->pVnode), code);
|
||||
}
|
||||
|
||||
metaTimeSeriesNotifyCheck(pMeta);
|
||||
metaFetchEntryFree(&pSuperEntry);
|
||||
return code;
|
||||
}
|
||||
|
@ -1595,6 +1612,10 @@ static int32_t metaHandleSuperTableUpdateImpl(SMeta *pMeta, SMetaHandleParam *pP
|
|||
}
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
metaUpdateStbStats(pMeta, pEntry->uid, 0, pEntry->stbEntry.schemaRow.nCols - pOldEntry->stbEntry.schemaRow.nCols);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1673,7 +1694,16 @@ static int32_t metaHandleSuperTableUpdate(SMeta *pMeta, const SMetaEntry *pEntry
|
|||
|
||||
tsdbCacheInvalidateSchema(pTsdb, pEntry->uid, -1, pEntry->stbEntry.schemaRow.version);
|
||||
}
|
||||
|
||||
if (updStat) {
|
||||
int64_t ctbNum = 0;
|
||||
int32_t ret = metaGetStbStats(pMeta->pVnode, pEntry->uid, &ctbNum, NULL);
|
||||
if (ret < 0) {
|
||||
metaError("vgId:%d, failed to get stb stats:%s uid:%" PRId64 " since %s", TD_VID(pMeta->pVnode), pEntry->name,
|
||||
pEntry->uid, tstrerror(ret));
|
||||
}
|
||||
pMeta->pVnode->config.vndStats.numOfTimeSeries += (ctbNum * deltaCol);
|
||||
if (deltaCol > 0) metaTimeSeriesNotifyCheck(pMeta);
|
||||
}
|
||||
metaFetchEntryFree(&pOldEntry);
|
||||
return code;
|
||||
}
|
||||
|
@ -1772,7 +1802,9 @@ static int32_t metaHandleNormalTableUpdate(SMeta *pMeta, const SMetaEntry *pEntr
|
|||
#endif
|
||||
tsdbCacheInvalidateSchema(pMeta->pVnode->pTsdb, 0, pEntry->uid, pEntry->ntbEntry.schemaRow.version);
|
||||
}
|
||||
metaTimeSeriesNotifyCheck(pMeta);
|
||||
int32_t deltaCol = pEntry->ntbEntry.schemaRow.nCols - pOldEntry->ntbEntry.schemaRow.nCols;
|
||||
pMeta->pVnode->config.vndStats.numOfNTimeSeries += deltaCol;
|
||||
if (deltaCol > 0) metaTimeSeriesNotifyCheck(pMeta);
|
||||
metaFetchEntryFree(&pOldEntry);
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -261,20 +261,6 @@ _exception:
|
|||
return code;
|
||||
}
|
||||
|
||||
void metaTimeSeriesNotifyCheck(SMeta *pMeta) {
|
||||
#if defined(TD_ENTERPRISE)
|
||||
int64_t nTimeSeries = metaGetTimeSeriesNum(pMeta, 0);
|
||||
int64_t deltaTS = nTimeSeries - pMeta->pVnode->config.vndStats.numOfReportedTimeSeries;
|
||||
if (deltaTS > tsTimeSeriesThreshold) {
|
||||
if (0 == atomic_val_compare_exchange_8(&dmNotifyHdl.state, 1, 2)) {
|
||||
if (tsem_post(&dmNotifyHdl.sem) != 0) {
|
||||
metaError("vgId:%d, failed to post semaphore, errno:%d", TD_VID(pMeta->pVnode), errno);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static int32_t metaDropTables(SMeta *pMeta, SArray *tbUids) {
|
||||
int32_t code = 0;
|
||||
if (taosArrayGetSize(tbUids) == 0) return TSDB_CODE_SUCCESS;
|
||||
|
|
|
@ -378,10 +378,6 @@ static int32_t metaCreateChildTable(SMeta *pMeta, int64_t version, SVCreateTbReq
|
|||
pReq->ctb.suid, version);
|
||||
}
|
||||
return code;
|
||||
|
||||
#if 0
|
||||
metaTimeSeriesNotifyCheck(pMeta);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Drop Child Table
|
||||
|
@ -489,9 +485,6 @@ static int32_t metaCreateNormalTable(SMeta *pMeta, int64_t version, SVCreateTbRe
|
|||
__func__, __FILE__, __LINE__, tstrerror(code), pReq->uid, pReq->name, version);
|
||||
}
|
||||
TAOS_RETURN(code);
|
||||
#if 0
|
||||
metaTimeSeriesNotifyCheck(pMeta);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Drop Normal Table
|
||||
|
|
|
@ -422,7 +422,7 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S
|
|||
reqs.nReqs = taosArrayGetSize(reqs.pArray);
|
||||
code = tqPutReqToQueue(pVnode, &reqs, encodeCreateChildTableForRPC, TDMT_VND_CREATE_TABLE);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("s-task:%s failed to send create table msg", id);
|
||||
tqError("s-task:%s failed to send create table msg, code:%s", id, tstrerror(code));
|
||||
}
|
||||
|
||||
_end:
|
||||
|
@ -861,6 +861,8 @@ int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkI
|
|||
int32_t vgId = TD_VID(pVnode);
|
||||
int64_t suid = pTask->outputInfo.tbSink.stbUid;
|
||||
const char* id = pTask->id.idStr;
|
||||
int32_t timeout = 300; // 5min
|
||||
int64_t start = taosGetTimestampSec();
|
||||
|
||||
while (pTableSinkInfo->uid == 0) {
|
||||
if (streamTaskShouldStop(pTask)) {
|
||||
|
@ -868,6 +870,12 @@ int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkI
|
|||
return TSDB_CODE_STREAM_EXEC_CANCELLED;
|
||||
}
|
||||
|
||||
int64_t waitingDuration = taosGetTimestampSec() - start;
|
||||
if (waitingDuration > timeout) {
|
||||
tqError("s-task:%s wait for table-creating:%s more than %dsec, failed", id, dstTableName, timeout);
|
||||
return TSDB_CODE_PAR_TABLE_NOT_EXIST;
|
||||
}
|
||||
|
||||
// wait for the table to be created
|
||||
SMetaReader mr = {0};
|
||||
metaReaderDoInit(&mr, pVnode->pMeta, META_READER_LOCK);
|
||||
|
|
|
@ -1217,6 +1217,7 @@ int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* m
|
|||
streamMetaReleaseTask(pMeta, pHTask);
|
||||
}
|
||||
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -947,20 +947,8 @@ int32_t vnodeProcessStreamMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo)
|
|||
return tqProcessTaskRetrieveRsp(pVnode->pTq, pMsg);
|
||||
case TDMT_VND_STREAM_SCAN_HISTORY:
|
||||
return tqProcessTaskScanHistory(pVnode->pTq, pMsg);
|
||||
case TDMT_STREAM_TASK_CHECKPOINT_READY:
|
||||
return tqProcessTaskCheckpointReadyMsg(pVnode->pTq, pMsg);
|
||||
case TDMT_STREAM_TASK_CHECKPOINT_READY_RSP:
|
||||
return tqProcessTaskCheckpointReadyRsp(pVnode->pTq, pMsg);
|
||||
case TDMT_STREAM_RETRIEVE_TRIGGER:
|
||||
return tqProcessTaskRetrieveTriggerReq(pVnode->pTq, pMsg);
|
||||
case TDMT_STREAM_RETRIEVE_TRIGGER_RSP:
|
||||
return tqProcessTaskRetrieveTriggerRsp(pVnode->pTq, pMsg);
|
||||
case TDMT_MND_STREAM_REQ_CHKPT_RSP:
|
||||
return tqProcessStreamReqCheckpointRsp(pVnode->pTq, pMsg);
|
||||
case TDMT_VND_GET_STREAM_PROGRESS:
|
||||
return tqStreamProgressRetrieveReq(pVnode->pTq, pMsg);
|
||||
case TDMT_MND_STREAM_CHKPT_REPORT_RSP:
|
||||
return tqProcessTaskChkptReportRsp(pVnode->pTq, pMsg);
|
||||
default:
|
||||
vError("unknown msg type:%d in stream queue", pMsg->msgType);
|
||||
return TSDB_CODE_APP_ERROR;
|
||||
|
@ -987,6 +975,18 @@ int32_t vnodeProcessStreamCtrlMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pIn
|
|||
return tqProcessTaskCheckReq(pVnode->pTq, pMsg);
|
||||
case TDMT_VND_STREAM_TASK_CHECK_RSP:
|
||||
return tqProcessTaskCheckRsp(pVnode->pTq, pMsg);
|
||||
case TDMT_STREAM_TASK_CHECKPOINT_READY:
|
||||
return tqProcessTaskCheckpointReadyMsg(pVnode->pTq, pMsg);
|
||||
case TDMT_STREAM_TASK_CHECKPOINT_READY_RSP:
|
||||
return tqProcessTaskCheckpointReadyRsp(pVnode->pTq, pMsg);
|
||||
case TDMT_STREAM_RETRIEVE_TRIGGER:
|
||||
return tqProcessTaskRetrieveTriggerReq(pVnode->pTq, pMsg);
|
||||
case TDMT_STREAM_RETRIEVE_TRIGGER_RSP:
|
||||
return tqProcessTaskRetrieveTriggerRsp(pVnode->pTq, pMsg);
|
||||
case TDMT_MND_STREAM_REQ_CHKPT_RSP:
|
||||
return tqProcessStreamReqCheckpointRsp(pVnode->pTq, pMsg);
|
||||
case TDMT_MND_STREAM_CHKPT_REPORT_RSP:
|
||||
return tqProcessTaskChkptReportRsp(pVnode->pTq, pMsg);
|
||||
default:
|
||||
vError("unknown msg type:%d in stream ctrl queue", pMsg->msgType);
|
||||
return TSDB_CODE_APP_ERROR;
|
||||
|
|
|
@ -3883,7 +3883,7 @@ static EDealRes rewriteColsToSelectValFuncImpl(SNode** pNode, void* pContext) {
|
|||
|
||||
static int32_t rewriteColsToSelectValFunc(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||
nodesRewriteExprs(pSelect->pProjectionList, rewriteColsToSelectValFuncImpl, pCxt);
|
||||
if (TSDB_CODE_SUCCESS == pCxt->errCode && !pSelect->isDistinct) {
|
||||
if (TSDB_CODE_SUCCESS == pCxt->errCode) {
|
||||
nodesRewriteExprs(pSelect->pOrderByList, rewriteColsToSelectValFuncImpl, pCxt);
|
||||
}
|
||||
return pCxt->errCode;
|
||||
|
|
|
@ -38,7 +38,7 @@ extern "C" {
|
|||
#define META_HB_SEND_IDLE_COUNTER 25 // send hb every 5 sec
|
||||
#define STREAM_TASK_KEY_LEN ((sizeof(int64_t)) << 1)
|
||||
#define STREAM_TASK_QUEUE_CAPACITY 5120
|
||||
#define STREAM_TASK_QUEUE_CAPACITY_IN_SIZE (30)
|
||||
#define STREAM_TASK_QUEUE_CAPACITY_IN_SIZE (10)
|
||||
|
||||
// clang-format off
|
||||
#define stFatal(...) do { if (stDebugFlag & DEBUG_FATAL) { taosPrintLog("STM FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0)
|
||||
|
|
|
@ -131,12 +131,12 @@ int32_t streamTaskBroadcastRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* r
|
|||
|
||||
code = tmsgSendReq(&pEpInfo->epSet, &rpcMsg);
|
||||
if (code != 0) {
|
||||
rpcFreeCont(buf);
|
||||
return code;
|
||||
stError("s-task:%s (child %d) failed to send retrieve req to task:0x%x (vgId:%d) QID:0x%" PRIx64 " code:%s",
|
||||
pTask->id.idStr, pTask->info.selfChildId, pEpInfo->taskId, pEpInfo->nodeId, req->reqId, tstrerror(code));
|
||||
} else {
|
||||
stDebug("s-task:%s (child %d) send retrieve req to task:0x%x (vgId:%d),QID:0x%" PRIx64, pTask->id.idStr,
|
||||
pTask->info.selfChildId, pEpInfo->taskId, pEpInfo->nodeId, req->reqId);
|
||||
}
|
||||
|
||||
stDebug("s-task:%s (child %d) send retrieve req to task:0x%x (vgId:%d),QID:0x%" PRIx64, pTask->id.idStr,
|
||||
pTask->info.selfChildId, pEpInfo->taskId, pEpInfo->nodeId, req->reqId);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
|
|
@ -807,6 +807,8 @@ static int32_t doStreamExecTask(SStreamTask* pTask) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int64_t st = taosGetTimestampMs();
|
||||
|
||||
EExtractDataCode ret = streamTaskGetDataFromInputQ(pTask, &pInput, &numOfBlocks, &blockSize);
|
||||
if (ret == EXEC_AFTER_IDLE) {
|
||||
streamTaskSetIdleInfo(pTask, MIN_INVOKE_INTERVAL);
|
||||
|
@ -841,8 +843,6 @@ static int32_t doStreamExecTask(SStreamTask* pTask) {
|
|||
continue;
|
||||
}
|
||||
|
||||
int64_t st = taosGetTimestampMs();
|
||||
|
||||
// here only handle the data block sink operation
|
||||
if (type == STREAM_INPUT__DATA_BLOCK) {
|
||||
pTask->execInfo.sink.dataSize += blockSize;
|
||||
|
@ -873,6 +873,13 @@ static int32_t doStreamExecTask(SStreamTask* pTask) {
|
|||
if (code) {
|
||||
return code;
|
||||
}
|
||||
|
||||
double el = (taosGetTimestampMs() - st) / 1000.0;
|
||||
if (el > 5.0) { // elapsed more than 5 sec, not occupy the CPU anymore
|
||||
stDebug("s-task:%s occupy more than 5.0s, release the exec threads and idle for 500ms", id);
|
||||
streamTaskSetIdleInfo(pTask, 500);
|
||||
return code;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -572,11 +572,11 @@ void streamMetaClear(SStreamMeta* pMeta) {
|
|||
}
|
||||
|
||||
void streamMetaClose(SStreamMeta* pMeta) {
|
||||
stDebug("vgId:%d start to close stream meta", pMeta->vgId);
|
||||
if (pMeta == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
stDebug("vgId:%d start to close stream meta", pMeta->vgId);
|
||||
int32_t code = taosRemoveRef(streamMetaRefPool, pMeta->rid);
|
||||
if (code) {
|
||||
stError("vgId:%d failed to remove meta ref:%" PRId64 ", code:%s", pMeta->vgId, pMeta->rid, tstrerror(code));
|
||||
|
|
|
@ -16,14 +16,7 @@
|
|||
#define _DEFAULT_SOURCE
|
||||
#include "os.h"
|
||||
#include "taoserror.h"
|
||||
|
||||
#if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL)
|
||||
#include "cus_name.h"
|
||||
#else
|
||||
#ifndef CUS_PROMPT
|
||||
#define CUS_PROMPT "taos"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define PROCESS_ITEM 12
|
||||
#define UUIDLEN37 37
|
||||
|
|
|
@ -385,13 +385,24 @@ static void taosReserveOldLog(char *oldName, char *keepName) {
|
|||
|
||||
static void taosKeepOldLog(char *oldName) {
|
||||
if (oldName[0] != 0) {
|
||||
char compressFileName[PATH_MAX + 20];
|
||||
snprintf(compressFileName, PATH_MAX + 20, "%s.gz", oldName);
|
||||
if (taosCompressFile(oldName, compressFileName) == 0) {
|
||||
int32_t code = taosRemoveFile(oldName);
|
||||
if (code != 0) {
|
||||
TAOS_UNUSED(printf("failed to remove file:%s, reason:%s\n", oldName, tstrerror(code)));
|
||||
}
|
||||
int32_t code = 0, lino = 0;
|
||||
TdFilePtr oldFile = NULL;
|
||||
if ((oldFile = taosOpenFile(oldName, TD_FILE_READ))) {
|
||||
TAOS_CHECK_GOTO(taosLockFile(oldFile), &lino, _exit2);
|
||||
char compressFileName[PATH_MAX + 20];
|
||||
snprintf(compressFileName, PATH_MAX + 20, "%s.gz", oldName);
|
||||
TAOS_CHECK_GOTO(taosCompressFile(oldName, compressFileName), &lino, _exit1);
|
||||
TAOS_CHECK_GOTO(taosRemoveFile(oldName), &lino, _exit1);
|
||||
_exit1:
|
||||
TAOS_UNUSED(taosUnLockFile(oldFile));
|
||||
_exit2:
|
||||
TAOS_UNUSED(taosCloseFile(&oldFile));
|
||||
} else {
|
||||
code = terrno;
|
||||
}
|
||||
if (code != 0 && tsLogEmbedded == 1) { // print error messages only in embedded log mode
|
||||
// avoid using uWarn or uError, as they may open a new log file and potentially cause a deadlock.
|
||||
fprintf(stderr, "WARN: failed at line %d to keep old log file:%s, reason:%s\n", lino, oldName, tstrerror(code));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1041,7 +1052,7 @@ static void taosWriteLog(SLogBuff *pLogBuf) {
|
|||
}
|
||||
|
||||
#define LOG_ROTATE_INTERVAL 3600
|
||||
#if !defined(TD_ENTERPRISE) || defined(ASSERT_NOT_CORE)
|
||||
#if !defined(TD_ENTERPRISE) || defined(ASSERT_NOT_CORE) || defined(GRANTS_CFG)
|
||||
#define LOG_INACTIVE_TIME 7200
|
||||
#define LOG_ROTATE_BOOT 900
|
||||
#else
|
||||
|
|
|
@ -21,16 +21,35 @@ from frame.sql import *
|
|||
from frame.caseBase import *
|
||||
from frame import *
|
||||
|
||||
|
||||
class TDTestCase(TBase):
|
||||
def caseDescription(self):
|
||||
"""
|
||||
[TD-11510] taosBenchmark test cases
|
||||
"""
|
||||
|
||||
def checkVersion(self):
|
||||
# run
|
||||
outputs = etool.runBinFile("taosBenchmark", "-V")
|
||||
print(outputs)
|
||||
if len(outputs) != 4:
|
||||
tdLog.exit(f"checkVersion return lines count {len(outputs)} != 4")
|
||||
# version string len
|
||||
assert len(outputs[1]) > 24
|
||||
# commit id
|
||||
assert len(outputs[2]) > 43
|
||||
assert outputs[2][:4] == "git:"
|
||||
# build info
|
||||
assert len(outputs[3]) > 36
|
||||
assert outputs[3][:6] == "build:"
|
||||
|
||||
tdLog.info("check taosBenchmark version successfully.")
|
||||
|
||||
|
||||
def run(self):
|
||||
# check version
|
||||
self.checkVersion()
|
||||
|
||||
# command line
|
||||
binPath = etool.benchMarkFile()
|
||||
cmd = (
|
||||
"%s -F 7 -n 10 -t 2 -x -y -M -C -d newtest -l 5 -A binary,nchar\(31\) -b tinyint,binary\(23\),bool,nchar -w 29 -E -m $%%^*"
|
||||
|
|
|
@ -34,7 +34,7 @@ class TDTestCase(TBase):
|
|||
sql = "select count(*) from meters"
|
||||
tdSql.query(sql)
|
||||
allCnt = tdSql.getData(0, 0)
|
||||
if allCnt < 2000000:
|
||||
if allCnt < 200000:
|
||||
tdLog.exit(f"taosbenchmark insert row small. row count={allCnt} sql={sql}")
|
||||
return
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
"name": "meters",
|
||||
"child_table_exists": "no",
|
||||
"childtable_count": 10,
|
||||
"insert_rows": 300000,
|
||||
"insert_rows": 30000,
|
||||
"childtable_prefix": "d",
|
||||
"insert_mode": "taosc",
|
||||
"insert_interval": 0,
|
||||
|
|
|
@ -27,10 +27,28 @@ class TDTestCase(TBase):
|
|||
case1<sdsang>: [TS-3072] taosdump dump escaped db name test
|
||||
"""
|
||||
|
||||
def checkVersion(self):
|
||||
# run
|
||||
outputs = etool.runBinFile("taosdump", "-V")
|
||||
print(outputs)
|
||||
if len(outputs) != 4:
|
||||
tdLog.exit(f"checkVersion return lines count {len(outputs)} != 4")
|
||||
# version string len
|
||||
assert len(outputs[1]) > 19
|
||||
# commit id
|
||||
assert len(outputs[2]) > 43
|
||||
assert outputs[2][:4] == "git:"
|
||||
# build info
|
||||
assert len(outputs[3]) > 36
|
||||
assert outputs[3][:6] == "build:"
|
||||
|
||||
tdLog.info("check taosdump version successfully.")
|
||||
|
||||
|
||||
def run(self):
|
||||
# check version
|
||||
self.checkVersion()
|
||||
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.execute("drop database if exists db")
|
||||
|
|
|
@ -230,7 +230,7 @@ endi
|
|||
sql_error show create stable t0;
|
||||
|
||||
sql show variables;
|
||||
if $rows != 88 then
|
||||
if $rows != 87 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ system sh/exec.sh -n dnode1 -s start
|
|||
sql insert into t1 values(1648791223004,5,2,3,1.1);
|
||||
|
||||
loop4:
|
||||
sleep 1000
|
||||
run tsim/stream/checkTaskStatus.sim
|
||||
|
||||
sql select * from streamt;
|
||||
|
||||
|
|
|
@ -120,7 +120,7 @@ if $rows != 3 then
|
|||
endi
|
||||
|
||||
sql show variables;
|
||||
if $rows != 88 then
|
||||
if $rows != 87 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -135,8 +135,38 @@ class TDTestCase:
|
|||
port = dnode.cfgDict["serverPort"]
|
||||
config_dir = dnode.cfgDir
|
||||
return taos.connect(host=host, port=int(port), config=config_dir)
|
||||
|
||||
def getShowGrantsTimeSeries(self, maxRetry=10):
|
||||
for nRetry in range(maxRetry):
|
||||
tdSql.query("show grants")
|
||||
timeseries = tdSql.queryResult[0][5]
|
||||
tdSql.query("show grants full")
|
||||
full_timeseries = tdSql.queryResult[1][3]
|
||||
if timeseries == full_timeseries:
|
||||
return int(timeseries.split('/')[0])
|
||||
else:
|
||||
tdLog.info(f"timeseries: {timeseries}, != full_timeseries: {full_timeseries}, retry: {nRetry}")
|
||||
time.sleep(1)
|
||||
raise Exception("Timeseries not equal within {maxRetry} seconds")
|
||||
|
||||
def s1_check_alive(self):
|
||||
def getTablesTimeSeries(self):
|
||||
tdSql.query(f"select cast(sum(columns-1) as int) as tss from information_schema.ins_tables where db_name not in ('information_schema', 'performance_schema', 'audit')")
|
||||
return int(tdSql.queryResult[0][0])
|
||||
|
||||
def checkGrantsTimeSeries(self, prompt="", nExpectedTimeSeries=0, maxRetry=10):
|
||||
for nRetry in range(maxRetry):
|
||||
tss_grant = self.getShowGrantsTimeSeries()
|
||||
if tss_grant == nExpectedTimeSeries:
|
||||
tss_table = self.getTablesTimeSeries()
|
||||
if tss_grant == tss_table:
|
||||
tdLog.info(f"{prompt}: tss_grant: {tss_grant} == tss_table: {tss_table}")
|
||||
return
|
||||
else:
|
||||
raise Exception(f"{prompt}: tss_grant: {tss_grant} != tss_table: {tss_table}")
|
||||
time.sleep(1)
|
||||
raise Exception(f"{prompt}: tss_grant: {tss_grant} != nExpectedTimeSeries: {nExpectedTimeSeries}")
|
||||
|
||||
def s1_check_timeseries(self):
|
||||
# check cluster alive
|
||||
tdLog.printNoPrefix("======== test cluster alive: ")
|
||||
tdSql.checkDataLoop(0, 0, 1, "show cluster alive;", 20, 0.5)
|
||||
|
@ -144,6 +174,46 @@ class TDTestCase:
|
|||
tdSql.query("show db.alive;")
|
||||
tdSql.checkData(0, 0, 1)
|
||||
|
||||
# check timeseries
|
||||
tss_grant = 5
|
||||
for i in range(0, 3):
|
||||
tdLog.printNoPrefix(f"======== test timeseries: loop{i}")
|
||||
self.checkGrantsTimeSeries("initial check", tss_grant)
|
||||
tdSql.execute("create database if not exists db100")
|
||||
tdSql.execute("create table db100.stb100(ts timestamp, c0 int,c1 bigint,c2 int,c3 float,c4 double) tags(t0 bigint unsigned)")
|
||||
tdSql.execute("create table db100.ctb100 using db100.stb100 tags(100)")
|
||||
tdSql.execute("create table db100.ctb101 using db100.stb100 tags(101)")
|
||||
tdSql.execute("create table db100.ntb100 (ts timestamp, c0 int,c1 bigint,c2 int,c3 float,c4 double)")
|
||||
tdSql.execute("create table db100.ntb101 (ts timestamp, c0 int,c1 bigint,c2 int,c3 float,c4 double)")
|
||||
tss_grant += 20
|
||||
self.checkGrantsTimeSeries("create tables and check", tss_grant)
|
||||
tdSql.execute("alter table db100.stb100 add column c5 int")
|
||||
tdSql.execute("alter stable db100.stb100 add column c6 int")
|
||||
tdSql.execute("alter table db100.stb100 add tag t1 int")
|
||||
tss_grant += 4
|
||||
self.checkGrantsTimeSeries("add stable column and check", tss_grant)
|
||||
tdSql.execute("create table db100.ctb102 using db100.stb100 tags(102, 102)")
|
||||
tdSql.execute("alter table db100.ctb100 set tag t0=1000")
|
||||
tdSql.execute("alter table db100.ntb100 add column c5 int")
|
||||
tss_grant += 8
|
||||
self.checkGrantsTimeSeries("add ntable column and check", tss_grant)
|
||||
tdSql.execute("alter table db100.stb100 drop column c5")
|
||||
tdSql.execute("alter table db100.stb100 drop tag t1")
|
||||
tdSql.execute("alter table db100.ntb100 drop column c0")
|
||||
tdSql.execute("alter table db100.stb100 drop column c0")
|
||||
tss_grant -= 7
|
||||
self.checkGrantsTimeSeries("drop stb/ntb column and check", tss_grant)
|
||||
tdSql.execute("drop table db100.ctb100")
|
||||
tdSql.execute("drop table db100.ntb100")
|
||||
tss_grant -= 10
|
||||
self.checkGrantsTimeSeries("drop ctb/ntb and check", tss_grant)
|
||||
tdSql.execute("drop table db100.stb100")
|
||||
tss_grant -= 10
|
||||
self.checkGrantsTimeSeries("drop stb and check", tss_grant)
|
||||
tdSql.execute("drop database db100")
|
||||
tss_grant -= 5
|
||||
self.checkGrantsTimeSeries("drop database and check", tss_grant)
|
||||
|
||||
def s2_check_show_grants_ungranted(self):
|
||||
tdLog.printNoPrefix("======== test show grants ungranted: ")
|
||||
self.infoPath = os.path.join(self.workPath, ".clusterInfo")
|
||||
|
@ -221,7 +291,7 @@ class TDTestCase:
|
|||
# print(self.master_dnode.cfgDict)
|
||||
# keep the order of following steps
|
||||
self.s0_five_dnode_one_mnode()
|
||||
self.s1_check_alive()
|
||||
self.s1_check_timeseries()
|
||||
self.s2_check_show_grants_ungranted()
|
||||
self.s3_check_show_grants_granted()
|
||||
|
||||
|
|
|
@ -47,6 +47,80 @@ class TDTestCase:
|
|||
break
|
||||
return buildPath
|
||||
|
||||
def checkLogBak(self, logPath, expectLogBak):
|
||||
if platform.system().lower() == 'windows':
|
||||
return True
|
||||
result = False
|
||||
try:
|
||||
for file in os.listdir(logPath):
|
||||
file_path = os.path.join(logPath, file)
|
||||
if os.path.isdir(file_path):
|
||||
continue
|
||||
if file.endswith('.gz'):
|
||||
if expectLogBak:
|
||||
result = True
|
||||
else:
|
||||
raise Exception(f"Error: Found .gz file: {file_path}")
|
||||
if '.' in file:
|
||||
prefix, num_part = file.split('.', 1)
|
||||
logNum=0
|
||||
if num_part.isdigit():
|
||||
logNum = int(num_part)
|
||||
if logNum > 100:
|
||||
if not expectLogBak:
|
||||
raise Exception(f"Error: Found log file number >= 100: {file_path}")
|
||||
except Exception as e:
|
||||
raise Exception(f"Error: error occurred. Reason: {e}")
|
||||
return result
|
||||
|
||||
def checkTargetStrInFiles(self, filePaths, targetStr):
|
||||
result = False
|
||||
for filePath in filePaths:
|
||||
if not os.path.exists(filePath):
|
||||
continue
|
||||
try:
|
||||
with open(filePath, 'r', encoding='utf-8') as file:
|
||||
for line in file:
|
||||
if targetStr in line:
|
||||
result = True
|
||||
break
|
||||
except Exception as e:
|
||||
continue
|
||||
return result
|
||||
|
||||
def logRotateOccurred(self, logFiles, targetStr, maxRetry=15):
|
||||
result = False
|
||||
for i in range(maxRetry):
|
||||
if self.checkTargetStrInFiles(logFiles, targetStr):
|
||||
result = True
|
||||
break
|
||||
tdLog.info(f"wait {i+1} second(s) for log rotate")
|
||||
time.sleep(1)
|
||||
return result
|
||||
|
||||
def checkLogCompress(self):
|
||||
tdLog.info("Running check log compress")
|
||||
dnodePath = self.buildPath + "/../sim/dnode1"
|
||||
logPath = f"{dnodePath}/log"
|
||||
taosdLogFiles = [f"{logPath}/taosdlog.0", f"{logPath}/taosdlog.1"]
|
||||
logRotateStr="process log rotation"
|
||||
logRotateResult = self.logRotateOccurred(taosdLogFiles, logRotateStr)
|
||||
tdSql.checkEqual(True, logRotateResult)
|
||||
tdSql.checkEqual(False, self.checkLogBak(logPath, False))
|
||||
tdSql.execute("alter all dnodes 'logKeepDays 3'")
|
||||
tdSql.execute("alter all dnodes 'numOfLogLines 1000'")
|
||||
tdSql.execute("alter all dnodes 'debugFlag 143'")
|
||||
logCompress=False
|
||||
for i in range(30):
|
||||
logCompress=self.checkLogBak(logPath, True)
|
||||
if logCompress:
|
||||
break
|
||||
tdLog.info(f"wait {i+1} second(s) for log compress")
|
||||
time.sleep(1)
|
||||
tdSql.checkEqual(True, logCompress)
|
||||
tdSql.execute("alter all dnodes 'numOfLogLines 1000000'")
|
||||
tdSql.execute("alter all dnodes 'debugFlag 131'")
|
||||
|
||||
def prepareCfg(self, cfgPath, cfgDict):
|
||||
tdLog.info("make dir %s" % cfgPath)
|
||||
os.makedirs(cfgPath, exist_ok=True)
|
||||
|
@ -338,6 +412,7 @@ class TDTestCase:
|
|||
tdSql.checkEqual(True, os.path.exists(f"{dnodePath}/log/taoslog0.0"))
|
||||
|
||||
def run(self):
|
||||
self.checkLogCompress()
|
||||
self.checkLogOutput()
|
||||
self.checkLogRotate()
|
||||
self.closeTaosd()
|
||||
|
|
|
@ -47,7 +47,7 @@ class TDTestCase:
|
|||
|
||||
def case2(self):
|
||||
tdSql.query("show variables")
|
||||
tdSql.checkRows(88)
|
||||
tdSql.checkRows(87)
|
||||
|
||||
for i in range(self.replicaVar):
|
||||
tdSql.query("show dnode %d variables like 'debugFlag'" % (i + 1))
|
||||
|
|
|
@ -255,7 +255,31 @@ class TDTestCase:
|
|||
tdSql.error(f"select distinct t1, t0 from (select t1,t0 from {dbname}.stb1 where t0 > 2 group by ts) where t1 < 3")
|
||||
tdSql.query(f"select distinct stb1.t1, stb1.t2 from {dbname}.stb1, {dbname}.stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
|
||||
tdSql.query(f"select distinct t1.t1, t1.t2 from {dbname}.t1, {dbname}.t2 where t1.ts=t2.ts ")
|
||||
|
||||
self.ts5971()
|
||||
|
||||
def ts5971(self):
|
||||
dbname = "db"
|
||||
|
||||
tdSql.execute(f"DROP TABLE IF EXISTS {dbname}.t5971")
|
||||
tdSql.execute(f"create table {dbname}.t5971 (time TIMESTAMP, c1 INT)")
|
||||
tdSql.execute(f"INSERT INTO {dbname}.t5971(time, c1) VALUES (1641024000000, 1), (1641024005000, 2)")
|
||||
tdSql.query(f"SELECT DISTINCT CSUM(c1), time FROM {dbname}.t5971 ORDER BY time")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 0, 1)
|
||||
tdSql.checkData(0, 1, 1641024000000)
|
||||
tdSql.checkData(1, 0, 3)
|
||||
tdSql.checkData(1, 1, 1641024005000)
|
||||
|
||||
tdSql.query(f"SELECT DISTINCT CSUM(c1), time AS ref FROM {dbname}.t5971 ORDER BY ref")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 0, 1)
|
||||
tdSql.checkData(0, 1, 1641024000000)
|
||||
tdSql.checkData(1, 0, 3)
|
||||
tdSql.checkData(1, 1, 1641024005000)
|
||||
|
||||
tdSql.query(f"SELECT DISTINCT CSUM(c1), time FROM {dbname}.t5971")
|
||||
tdSql.checkRows(2)
|
||||
|
||||
|
||||
def stop(self):
|
||||
|
|
|
@ -1,267 +1,123 @@
|
|||
# TaosKeeper
|
||||
<!-- omit in toc -->
|
||||
# taosKeeper
|
||||
|
||||
taosKeeper 是 TDengine 各项监控指标的导出工具,通过简单的几项配置即可获取 TDengine 的运行状态。并且 taosKeeper 企业版支持多种收集器,可以方便进行监控数据的展示。
|
||||
[](https://github.com/taosdata/TDengine/actions/workflows/taoskeeper-ci-build.yml)
|
||||

|
||||

|
||||

|
||||
<br />
|
||||
[](https://twitter.com/tdenginedb)
|
||||
[](https://www.youtube.com/@tdengine)
|
||||
[](https://discord.com/invite/VZdSuUg4pS)
|
||||
[](https://www.linkedin.com/company/tdengine)
|
||||
[](https://stackoverflow.com/questions/tagged/tdengine)
|
||||
|
||||
taosKeeper 使用 TDengine RESTful 接口,所以不需要安装 TDengine 客户端即可使用。
|
||||
简体中文 | [English](./README.md)
|
||||
|
||||
## 构建
|
||||
<!-- omit in toc -->
|
||||
## 目录
|
||||
|
||||
### 获取源码
|
||||
- [1. 简介](#1-简介)
|
||||
- [2. 文档](#2-文档)
|
||||
- [3. 前置条件](#3-前置条件)
|
||||
- [4. 构建](#4-构建)
|
||||
- [5. 测试](#5-测试)
|
||||
- [5.1 运行测试](#51-运行测试)
|
||||
- [5.2 添加用例](#52-添加用例)
|
||||
- [5.3 性能测试](#53-性能测试)
|
||||
- [6. CI/CD](#6-cicd)
|
||||
- [7. 提交 Issues](#7-提交-issues)
|
||||
- [8. 提交 PR](#8-提交-pr)
|
||||
- [9. 引用](#9-引用)
|
||||
- [10. 许可证](#10-许可证)
|
||||
|
||||
从 GitHub 克隆源码:
|
||||
## 1. 简介
|
||||
|
||||
```sh
|
||||
git clone https://github.com/taosdata/TDengine
|
||||
cd TDengine/tools/keeper
|
||||
```
|
||||
taosKeeper 是 TDengine 3.0 版本全新引入的监控指标导出工具,旨在方便用户对 TDengine 的运行状态和性能指标进行实时监控。只需进行简单配置,TDengine 就能将自身的运行状态和各项指标等信息上报给 taosKeeper。taosKeeper 在接收到监控数据后,会利用 taosAdapter 提供的 RESTful 接口,将这些数据存储到 TDengine 中。
|
||||
|
||||
### 编译
|
||||
taosKeeper 的一个重要价值在于,它能够将多个甚至一批 TDengine 集群的监控数据集中存储到一个统一的平台。如此一来,监控软件便能轻松获取这些数据,进而实现对 TDengine 集群的全面监控与实时分析。通过 taosKeeper,用户可以更加便捷地了解 TDengine 的运行状况,及时发现并解决潜在问题,确保系统的稳定性和高效性。
|
||||
|
||||
taosKeeper 使用 `GO` 语言编写,在构建前需要配置好 `GO` 语言开发环境。
|
||||
## 2. 文档
|
||||
|
||||
```sh
|
||||
go mod tidy
|
||||
- 使用 taosKeeper,请参考 [taosKeeper 参考手册](https://docs.taosdata.com/reference/components/taoskeeper/),其中包括安装、配置、启动、数据收集与监控,以及集成 Prometheus 等方面的内容。
|
||||
- 本 README 主要面向希望自行贡献代码、编译和测试 taosKeeper 的开发者。如果想要学习 TDengine,可以浏览 [官方文档](https://docs.taosdata.com/)。
|
||||
|
||||
## 3. 前置条件
|
||||
|
||||
1. 已安装 Go 1.18 及以上版本。
|
||||
2. 本地已部署 TDengine,具体步骤请参考 [部署服务端](https://docs.taosdata.com/get-started/package/),且已启动 taosd 与 taosAdapter。
|
||||
|
||||
## 4. 构建
|
||||
|
||||
在 `TDengine/tools/keeper` 目录下运行以下命令以构建项目:
|
||||
|
||||
```bash
|
||||
go build
|
||||
```
|
||||
|
||||
## 安装
|
||||
## 5. 测试
|
||||
|
||||
如果是自行构建的项目,仅需要拷贝 `taoskeeper` 文件到你的 `PATH` 中。
|
||||
### 5.1 运行测试
|
||||
|
||||
```sh
|
||||
sudo install taoskeeper /usr/bin/
|
||||
在 `TDengine/tools/keeper` 目录下执行以下命令运行测试:
|
||||
|
||||
```bash
|
||||
sudo go test ./...
|
||||
```
|
||||
|
||||
## 启动
|
||||
测试用例将连接到本地的 TDengine 服务器和 taosAdapter 进行测试。测试完成后,你将看到类似如下的结果摘要。如果所有测试用例均通过,输出中将不会出现 `FAIL` 字样。
|
||||
|
||||
在启动前,应该做好如下配置:
|
||||
在 `/etc/taos/taoskeeper.toml` 配置 TDengine 连接参数以及监控指标前缀等其他信息。
|
||||
|
||||
```toml
|
||||
# gin 框架是否启用 debug
|
||||
debug = false
|
||||
|
||||
# 服务监听端口, 默认为 6043
|
||||
port = 6043
|
||||
|
||||
# 日志级别,包含 panic、error、info、debug、trace等
|
||||
loglevel = "info"
|
||||
|
||||
# 程序中使用协程池的大小
|
||||
gopoolsize = 50000
|
||||
|
||||
# 查询 TDengine 监控数据轮询间隔
|
||||
RotationInterval = "15s"
|
||||
|
||||
[tdengine]
|
||||
host = "127.0.0.1"
|
||||
port = 6041
|
||||
username = "root"
|
||||
password = "taosdata"
|
||||
|
||||
# 需要被监控的 taosAdapter
|
||||
[taosAdapter]
|
||||
address = ["127.0.0.1:6041"]
|
||||
|
||||
[metrics]
|
||||
# 监控指标前缀
|
||||
prefix = "taos"
|
||||
|
||||
# 存放监控数据的数据库
|
||||
database = "log"
|
||||
|
||||
# 指定需要监控的普通表
|
||||
tables = []
|
||||
|
||||
[environment]
|
||||
# 是否在容器中运行,影响 taosKeeper 自身的监控数据
|
||||
incgroup = false
|
||||
```text
|
||||
ok github.com/taosdata/taoskeeper/api 17.405s
|
||||
ok github.com/taosdata/taoskeeper/cmd 1.819s
|
||||
ok github.com/taosdata/taoskeeper/db 0.484s
|
||||
ok github.com/taosdata/taoskeeper/infrastructure/config 0.417s
|
||||
ok github.com/taosdata/taoskeeper/infrastructure/log 0.785s
|
||||
ok github.com/taosdata/taoskeeper/monitor 4.623s
|
||||
ok github.com/taosdata/taoskeeper/process 0.606s
|
||||
ok github.com/taosdata/taoskeeper/system 3.420s
|
||||
ok github.com/taosdata/taoskeeper/util 0.097s
|
||||
ok github.com/taosdata/taoskeeper/util/pool 0.146s
|
||||
```
|
||||
|
||||
现在可以启动服务,输入:
|
||||
### 5.2 添加用例
|
||||
|
||||
```sh
|
||||
taoskeeper
|
||||
```
|
||||
在以 `_test.go` 结尾的文件中添加测试用例,并且确保新增代码都有对应的测试用例覆盖。
|
||||
|
||||
如果你使用 `systemd`,复制 `taoskeeper.service` 到 `/lib/systemd/system/`,并启动服务。
|
||||
### 5.3 性能测试
|
||||
|
||||
```sh
|
||||
sudo cp taoskeeper.service /lib/systemd/system/
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl start taoskeeper
|
||||
```
|
||||
性能测试正在开发中。
|
||||
|
||||
让 taosKeeper 随系统开机自启动。
|
||||
## 6. CI/CD
|
||||
|
||||
```sh
|
||||
sudo systemctl enable taoskeeper
|
||||
```
|
||||
- [Build Workflow](https://github.com/taosdata/TDengine/actions/workflows/taoskeeper-ci-build.yml)
|
||||
- Code Coverage - TODO
|
||||
|
||||
如果使用 `systemd`,你可以使用如下命令完成安装。
|
||||
## 7. 提交 Issues
|
||||
|
||||
```sh
|
||||
go mod tidy
|
||||
go build
|
||||
sudo install taoskeeper /usr/bin/
|
||||
sudo cp taoskeeper.service /lib/systemd/system/
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl start taoskeeper
|
||||
sudo systemctl enable taoskeeper
|
||||
```
|
||||
我们欢迎提交 [GitHub Issue](https://github.com/taosdata/TDengine/issues)。提交时请尽量提供以下信息,以便快速定位问题:
|
||||
|
||||
## Docker
|
||||
- 问题描述:具体问题表现及是否必现,建议附上详细调用堆栈或日志信息。
|
||||
- taosKeeper 版本:可通过 `taoskeeper -V` 获取版本信息。
|
||||
- TDengine 服务端版本:可通过 `taos -V` 获取版本信息。
|
||||
|
||||
如下介绍了如何在 docker 中构建 taosKeeper:
|
||||
如有其它相关信息(如环境配置、操作系统版本等),请一并补充,以便我们更全面地了解问题。
|
||||
|
||||
在构建前请配置好 `./config/taoskeeper.toml` 中合适的参数,并编辑 Dockerfile ,示例如下。
|
||||
## 8. 提交 PR
|
||||
|
||||
```dockerfile
|
||||
FROM golang:1.18.6-alpine as builder
|
||||
我们欢迎开发者共同参与本项目开发,提交 PR 时请按照以下步骤操作:
|
||||
|
||||
WORKDIR /usr/src/taoskeeper
|
||||
COPY ./ /usr/src/taoskeeper/
|
||||
ENV GO111MODULE=on \
|
||||
GOPROXY=https://goproxy.cn,direct
|
||||
RUN go mod tidy && go build
|
||||
1. Fork 仓库:请先 Fork 本仓库,具体步骤请参考 [如何 Fork 仓库](https://docs.github.com/en/get-started/quickstart/fork-a-repo)。
|
||||
2. 创建新分支:基于 `main` 分支创建一个新分支,并使用有意义的分支名称(例如:`git checkout -b feature/my_feature`)。请勿直接在 main 分支上进行修改。
|
||||
3. 开发与测试:完成代码修改后,确保所有单元测试都能通过,并为新增功能或修复的 Bug 添加相应的测试用例。
|
||||
4. 提交代码:将修改提交到远程分支(例如:`git push origin feature/my_feature`)。
|
||||
5. 创建 Pull Request:在 GitHub 上发起 [Pull Request](https://github.com/taosdata/TDengine/pulls),具体步骤请参考 [如何创建 Pull Request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request)。
|
||||
6. 检查 CI:提交 PR 后,可在 Pull Request 中找到自己提交的 PR,点击对应的链接,即可查看该 PR 的 CI 是否通过。若通过,会显示 `All checks have passed`。无论 CI 是否通过,均可点击 `Show all checks -> Details` 查看详细的测试用例日志。
|
||||
|
||||
FROM alpine:3
|
||||
RUN mkdir -p /etc/taos
|
||||
COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/
|
||||
COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml
|
||||
EXPOSE 6043
|
||||
CMD ["taoskeeper"]
|
||||
```
|
||||
## 9. 引用
|
||||
|
||||
如果已经有 taosKeeper 可执行文件,在配置好 `taoskeeper.toml` 后你可以使用如下方式构建:
|
||||
[TDengine 官网](https://www.taosdata.com/)
|
||||
|
||||
```dockerfile
|
||||
FROM ubuntu:18.04
|
||||
RUN mkdir -p /etc/taos
|
||||
COPY ./taoskeeper /usr/bin/
|
||||
COPY ./taoskeeper.toml /etc/taos/taoskeeper.toml
|
||||
EXPOSE 6043
|
||||
CMD ["taoskeeper"]
|
||||
```
|
||||
## 10. 许可证
|
||||
|
||||
## 使用(**企业版**)
|
||||
|
||||
### Prometheus (by scrape)
|
||||
|
||||
taosKeeper 可以像 `node-exporter` 一样向 Prometheus 提供监控指标。\
|
||||
在 `/etc/prometheus/prometheus.yml` 添加配置:
|
||||
|
||||
```yml
|
||||
global:
|
||||
scrape_interval: 5s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: "taoskeeper"
|
||||
static_configs:
|
||||
- targets: ["taoskeeper:6043"]
|
||||
```
|
||||
|
||||
现在使用 PromQL 查询即可以显示结果,比如要查看指定主机(通过 FQDN 正则匹配表达式筛选)硬盘使用百分比:
|
||||
|
||||
```promql
|
||||
taos_dn_disk_used / taos_dn_disk_total {fqdn=~ "tdengine.*"}
|
||||
```
|
||||
|
||||
你可以使用 `docker-compose` 测试完整的链路。
|
||||
`docker-compose.yml`示例:
|
||||
|
||||
```yml
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
tdengine:
|
||||
image: tdengine/tdengine
|
||||
environment:
|
||||
TAOS_FQDN: tdengine
|
||||
volumes:
|
||||
- taosdata:/var/lib/taos
|
||||
taoskeeper:
|
||||
build: ./
|
||||
depends_on:
|
||||
- tdengine
|
||||
environment:
|
||||
TDENGINE_HOST: tdengine
|
||||
TDENGINE_PORT: 6041
|
||||
volumes:
|
||||
- ./config/taoskeeper.toml:/etc/taos/taoskeeper.toml
|
||||
ports:
|
||||
- 6043:6043
|
||||
prometheus:
|
||||
image: prom/prometheus
|
||||
volumes:
|
||||
- ./prometheus/:/etc/prometheus/
|
||||
ports:
|
||||
- 9090:9090
|
||||
volumes:
|
||||
taosdata:
|
||||
```
|
||||
|
||||
启动:
|
||||
|
||||
```sh
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
现在通过访问 <http://localhost:9090> 来查询结果。访问[simple dashboard](https://grafana.com/grafana/dashboards/15164) 来查看TaosKeeper + Prometheus + Grafana 监控 TDengine 的快速启动实例。
|
||||
|
||||
### Telegraf
|
||||
|
||||
如果使用 telegraf 来收集各个指标,仅需要在配置中增加:
|
||||
|
||||
```toml
|
||||
[[inputs.prometheus]]
|
||||
## An array of urls to scrape metrics from.
|
||||
urls = ["http://taoskeeper:6043/metrics"]
|
||||
```
|
||||
|
||||
可以通过 `docker-compose` 来测试
|
||||
|
||||
```sh
|
||||
docker-compose -f docker-compose.yml -f telegraf.yml up -d telegraf taoskeeper
|
||||
```
|
||||
|
||||
由于可以在 `telegraf.conf` 设置日志为标准输出:
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
files = ["stdout"]
|
||||
```
|
||||
|
||||
所以你可以通过 `docker-compose logs` 在标准输出中追踪 TDengine 各项指标。
|
||||
|
||||
```sh
|
||||
docker-compose -f docker-compose.yml -f telegraf.yml logs -f telegraf
|
||||
```
|
||||
|
||||
### Zabbix
|
||||
|
||||
1. 导入 zabbix 临时文件 `zbx_taos_keeper_templates.xml`。
|
||||
2. 使用 `TDengine` 模板来创建主机,修改宏 `{$TAOSKEEPER_HOST}` 和 `{$COLLECTION_INTERVAL}`。
|
||||
3. 等待并查看到自动创建的条目。
|
||||
|
||||
### 常见问题
|
||||
|
||||
* 启动报错,显示connection refused
|
||||
|
||||
**解析**:taosKeeper 依赖 restful 接口查询数据,请检查 taosAdapter 是否正常运行或 taoskeeper.toml 中 taosAdapter 地址是否正确。
|
||||
|
||||
* taosKeeper 监控不同 TDengine 显示的检测指标数目不一致?
|
||||
|
||||
**解析**:如果 TDengine 中未创建某项指标,taoskeeper 不能获取对应的检测结果。
|
||||
|
||||
* 不能接收到 TDengine 的监控日志。
|
||||
|
||||
**解析**: 修改 `/etc/taos/taos.cfg` 文件并增加如下参数:
|
||||
|
||||
```cfg
|
||||
monitor 1 // 启用monitor
|
||||
monitorInterval 30 // 发送间隔 (s)
|
||||
monitorFqdn localhost // 接收消息的FQDN,默认为空
|
||||
monitorPort 6043 // 接收消息的端口号
|
||||
monitorMaxLogs 100 // 每个监控间隔缓存的最大日志数量
|
||||
```
|
||||
[AGPL-3.0 License](../../LICENSE)
|
||||
|
|
|
@ -1,273 +1,123 @@
|
|||
# TaosKeeper
|
||||
<!-- omit in toc -->
|
||||
# taosKeeper
|
||||
|
||||
TDengine Metrics Exporter for Kinds of Collectors, you can obtain the running status of TDengine by performing several simple configurations.
|
||||
[](https://github.com/taosdata/TDengine/actions/workflows/taoskeeper-ci-build.yml)
|
||||

|
||||

|
||||

|
||||
<br />
|
||||
[](https://twitter.com/tdenginedb)
|
||||
[](https://www.youtube.com/@tdengine)
|
||||
[](https://discord.com/invite/VZdSuUg4pS)
|
||||
[](https://www.linkedin.com/company/tdengine)
|
||||
[](https://stackoverflow.com/questions/tagged/tdengine)
|
||||
|
||||
This tool uses TDengine RESTful API, so you could just build it without TDengine client.
|
||||
English | [简体中文](./README-CN.md)
|
||||
|
||||
## Build
|
||||
<!-- omit in toc -->
|
||||
## Table of Contents
|
||||
|
||||
### Get the source codes
|
||||
- [1. Introduction](#1-introduction)
|
||||
- [2. Documentation](#2-documentation)
|
||||
- [3. Prerequisites](#3-prerequisites)
|
||||
- [4. Build](#4-build)
|
||||
- [5. Testing](#5-testing)
|
||||
- [5.1 Test Execution](#51-test-execution)
|
||||
- [5.2 Test Case Addition](#52-test-case-addition)
|
||||
- [5.3 Performance Testing](#53-performance-testing)
|
||||
- [6. CI/CD](#6-cicd)
|
||||
- [7. Submitting Issues](#7-submitting-issues)
|
||||
- [8. Submitting PR](#8-submitting-pr)
|
||||
- [9. References](#9-references)
|
||||
- [10. License](#10-license)
|
||||
|
||||
```sh
|
||||
git clone https://github.com/taosdata/TDengine
|
||||
cd TDengine/tools/keeper
|
||||
```
|
||||
## 1. Introduction
|
||||
|
||||
### compile
|
||||
taosKeeper is a new monitoring indicator export tool introduced in TDengine 3.0, which is designed to facilitate users to monitor the operating status and performance indicators of TDengine in real time. With simple configuration, TDengine can report its own operating status and various indicators to taosKeeper. After receiving the monitoring data, taosKeeper will use the RESTful interface provided by taosAdapter to store the data in TDengine.
|
||||
|
||||
```sh
|
||||
go mod tidy
|
||||
An important value of taosKeeper is that it can store the monitoring data of multiple or even a batch of TDengine clusters in a unified platform. In this way, the monitoring software can easily obtain this data, and then realize comprehensive monitoring and real-time analysis of the TDengine cluster. Through taosKeeper, users can more easily understand the operation status of TDengine, discover and solve potential problems in a timely manner, and ensure the stability and efficiency of the system.
|
||||
|
||||
## 2. Documentation
|
||||
|
||||
- To use taosKeeper, please refer to the [taosKeeper Reference](https://docs.tdengine.com/tdengine-reference/components/taoskeeper/), which includes installation, configuration, startup, data collection and monitoring, and Prometheus integration.
|
||||
- This README is mainly for developers who want to contribute code, compile and test taosKeeper. If you want to learn TDengine, you can browse the [official documentation](https://docs.tdengine.com/).
|
||||
|
||||
## 3. Prerequisites
|
||||
|
||||
1. Go 1.18 or above has been installed.
|
||||
2. TDengine has been deployed locally. For specific steps, please refer to [Deploy TDengine](https://docs.tdengine.com/get-started/deploy-from-package/), and taosd and taosAdapter have been started.
|
||||
|
||||
## 4. Build
|
||||
|
||||
Run the following command in the `TDengine/tools/keeper` directory to build the project:
|
||||
|
||||
```bash
|
||||
go build
|
||||
```
|
||||
|
||||
## Install
|
||||
## 5. Testing
|
||||
|
||||
If you build the tool by your self, just copy the `taoskeeper` binary to your `PATH`.
|
||||
### 5.1 Test Execution
|
||||
|
||||
```sh
|
||||
sudo install taoskeeper /usr/bin/
|
||||
Run the test by executing the following command in the `TDengine/tools/keeper` directory:
|
||||
|
||||
```bash
|
||||
sudo go test ./...
|
||||
```
|
||||
|
||||
## Start
|
||||
The test case will connect to the local TDengine server and taosAdapter for testing. After the test is completed, you will see a result summary similar to the following. If all test cases pass, there will be no `FAIL` in the output.
|
||||
|
||||
Before start, you should configure some options like database ip, port or the prefix and others for exported metrics.
|
||||
|
||||
in `/etc/taos/taoskeeper.toml`.
|
||||
|
||||
```toml
|
||||
# Start with debug middleware for gin
|
||||
debug = false
|
||||
|
||||
# Listen port, default is 6043
|
||||
port = 6043
|
||||
|
||||
# log level
|
||||
loglevel = "info"
|
||||
|
||||
# go pool size
|
||||
gopoolsize = 50000
|
||||
|
||||
# interval for TDengine metrics
|
||||
RotationInterval = "15s"
|
||||
|
||||
[tdengine]
|
||||
host = "127.0.0.1"
|
||||
port = 6041
|
||||
username = "root"
|
||||
password = "taosdata"
|
||||
|
||||
# list of taosAdapter that need to be monitored
|
||||
[taosAdapter]
|
||||
address = ["127.0.0.1:6041"]
|
||||
|
||||
[metrics]
|
||||
# metrics prefix in metrics names.
|
||||
prefix = "taos"
|
||||
|
||||
# database for storing metrics data
|
||||
database = "log"
|
||||
|
||||
# export some tables that are not super table
|
||||
tables = []
|
||||
|
||||
[environment]
|
||||
# Whether running in cgroup.
|
||||
incgroup = false
|
||||
```text
|
||||
ok github.com/taosdata/taoskeeper/api 17.405s
|
||||
ok github.com/taosdata/taoskeeper/cmd 1.819s
|
||||
ok github.com/taosdata/taoskeeper/db 0.484s
|
||||
ok github.com/taosdata/taoskeeper/infrastructure/config 0.417s
|
||||
ok github.com/taosdata/taoskeeper/infrastructure/log 0.785s
|
||||
ok github.com/taosdata/taoskeeper/monitor 4.623s
|
||||
ok github.com/taosdata/taoskeeper/process 0.606s
|
||||
ok github.com/taosdata/taoskeeper/system 3.420s
|
||||
ok github.com/taosdata/taoskeeper/util 0.097s
|
||||
ok github.com/taosdata/taoskeeper/util/pool 0.146s
|
||||
```
|
||||
|
||||
Now you could run the tool:
|
||||
### 5.2 Test Case Addition
|
||||
|
||||
```sh
|
||||
taoskeeper
|
||||
```
|
||||
Add test cases in files ending with `_test.go` and make sure the new code is covered by the corresponding test cases.
|
||||
|
||||
If you use `systemd`, copy the `taoskeeper.service` to `/lib/systemd/system/` and start the service.
|
||||
### 5.3 Performance Testing
|
||||
|
||||
```sh
|
||||
sudo cp taoskeeper.service /lib/systemd/system/
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl start taoskeeper
|
||||
```
|
||||
Performance testing is under development.
|
||||
|
||||
To start taoskeeper whenever os rebooted, you should enable the systemd service:
|
||||
## 6. CI/CD
|
||||
|
||||
```sh
|
||||
sudo systemctl enable taoskeeper
|
||||
```
|
||||
- [Build Workflow](https://github.com/taosdata/TDengine/actions/workflows/taoskeeper-ci-build.yml)
|
||||
- Code Coverage - TODO
|
||||
|
||||
So if use `systemd`, you'd better install it with these lines all-in-one:
|
||||
## 7. Submitting Issues
|
||||
|
||||
```sh
|
||||
go mod tidy
|
||||
go build
|
||||
sudo install taoskeeper /usr/bin/
|
||||
sudo cp taoskeeper.service /lib/systemd/system/
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl start taoskeeper
|
||||
sudo systemctl enable taoskeeper
|
||||
```
|
||||
We welcome submissions of [GitHub Issues](https://github.com/taosdata/TDengine/issues). Please provide the following information when submitting so that the problem can be quickly located:
|
||||
|
||||
## Docker
|
||||
- Problem description: The specific problem manifestation and whether it must occur. It is recommended to attach detailed call stack or log information.
|
||||
- taosKeeper version: You can get the version information through `taoskeeper -V`.
|
||||
- TDengine server version: You can get the version information through `taos -V`.
|
||||
|
||||
Here is an example to show how to build this tool in docker:
|
||||
If you have other relevant information (such as environment configuration, operating system version, etc.), please add it so that we can understand the problem more comprehensively.
|
||||
|
||||
Before building, you should configure `./config/taoskeeper.toml` with proper parameters and edit Dockerfile. Take following as example.
|
||||
## 8. Submitting PR
|
||||
|
||||
```dockerfile
|
||||
FROM golang:1.18.2 as builder
|
||||
We welcome developers to participate in the development of this project. Please follow the steps below when submitting a PR:
|
||||
|
||||
WORKDIR /usr/src/taoskeeper
|
||||
COPY ./ /usr/src/taoskeeper/
|
||||
ENV GO111MODULE=on \
|
||||
GOPROXY=https://goproxy.cn,direct
|
||||
RUN go mod tidy && go build
|
||||
1. Fork the repository: Please fork this repository first. For specific steps, please refer to [How to Fork a Repository](https://docs.github.com/en/get-started/quickstart/fork-a-repo).
|
||||
2. Create a new branch: Create a new branch based on the `main` branch and use a meaningful branch name (for example: `git checkout -b feature/my_feature`). Do not modify it directly on the main branch.
|
||||
3. Development and testing: After completing the code modification, make sure that all unit tests pass, and add corresponding test cases for new features or fixed bugs.
|
||||
4. Submit code: Submit the changes to the remote branch (for example: `git push origin feature/my_feature`).
|
||||
5. Create a Pull Request: Initiate a [Pull Request](https://github.com/taosdata/TDengine/pulls) on GitHub. For specific steps, please refer to [How to Create a Pull Request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request).
|
||||
6. Check CI: After submitting the PR, you can find the PR you submitted in the Pull Request and click the corresponding link to check whether the CI of the PR has passed. If it has passed, it will show `All checks have passed`. Regardless of whether CI has passed or not, you can click `Show all checks/Details` to view detailed test case logs.
|
||||
|
||||
FROM alpine:3
|
||||
RUN mkdir -p /etc/taos
|
||||
COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/
|
||||
COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml
|
||||
EXPOSE 6043
|
||||
CMD ["taoskeeper"]
|
||||
```
|
||||
## 9. References
|
||||
|
||||
If you already have taosKeeper binary file, you can build this tool like:
|
||||
[TDengine Official Website](https://www.tdengine.com/)
|
||||
|
||||
```dockerfile
|
||||
FROM ubuntu:18.04
|
||||
RUN mkdir -p /etc/taos
|
||||
COPY ./taoskeeper /usr/bin/
|
||||
COPY ./taoskeeper.toml /etc/taos/taoskeeper.toml
|
||||
EXPOSE 6043
|
||||
CMD ["taoskeeper"]
|
||||
```
|
||||
## 10. License
|
||||
|
||||
## Usage (**Enterprise Edition**)
|
||||
|
||||
### Prometheus (by scrape)
|
||||
|
||||
It's now act as a prometheus exporter like `node-exporter`.
|
||||
|
||||
Here's how to add this in scrape configs of `/etc/prometheus/prometheus.yml`:
|
||||
|
||||
```yml
|
||||
global:
|
||||
scrape_interval: 5s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: "taoskeeper"
|
||||
static_configs:
|
||||
- targets: [ "taoskeeper:6043" ]
|
||||
```
|
||||
|
||||
Now PromQL query will show the right result, for example, to show disk used percent in an specific host with FQDN regex
|
||||
match expression:
|
||||
|
||||
```promql
|
||||
taos_dn_disk_used / taos_dn_disk_total {fqdn=~ "tdengine.*"}
|
||||
```
|
||||
|
||||
You can use `docker-compose` with the current `docker-compose.yml` to test the whole stack.
|
||||
|
||||
Here is the `docker-compose.yml`:
|
||||
|
||||
```yml
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
tdengine:
|
||||
image: tdengine/tdengine
|
||||
environment:
|
||||
TAOS_FQDN: tdengine
|
||||
volumes:
|
||||
- taosdata:/var/lib/taos
|
||||
taoskeeper:
|
||||
build: ./
|
||||
depends_on:
|
||||
- tdengine
|
||||
environment:
|
||||
TDENGINE_HOST: tdengine
|
||||
TDENGINE_PORT: 6041
|
||||
volumes:
|
||||
- ./config/taoskeeper.toml:/etc/taos/taoskeeper.toml
|
||||
ports:
|
||||
- 6043:6043
|
||||
prometheus:
|
||||
image: prom/prometheus
|
||||
volumes:
|
||||
- ./prometheus/:/etc/prometheus/
|
||||
ports:
|
||||
- 9090:9090
|
||||
volumes:
|
||||
taosdata:
|
||||
|
||||
```
|
||||
|
||||
Start the stack:
|
||||
|
||||
```sh
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
Now you point to <http://localhost:9090> (if you have not started a prometheus server by yourself) and query.
|
||||
|
||||
For a quick demo with TaosKeeper + Prometheus + Grafana, we provide
|
||||
a [simple dashboard](https://grafana.com/grafana/dashboards/15164) to monitor TDengine.
|
||||
|
||||
### Telegraf
|
||||
|
||||
If you are using telegraf to collect metrics, just add inputs like this:
|
||||
|
||||
```toml
|
||||
[[inputs.prometheus]]
|
||||
## An array of urls to scrape metrics from.
|
||||
urls = ["http://taoskeeper:6043/metrics"]
|
||||
```
|
||||
|
||||
You can test it with `docker-compose`:
|
||||
|
||||
```sh
|
||||
docker-compose -f docker-compose.yml -f telegraf.yml up -d telegraf taoskeeper
|
||||
```
|
||||
|
||||
Since we have set an stdout file output in `telegraf.conf`:
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
files = ["stdout"]
|
||||
```
|
||||
|
||||
So you can track with TDengine metrics in standard output with `docker-compose logs`:
|
||||
|
||||
```sh
|
||||
docker-compose -f docker-compose.yml -f telegraf.yml logs -f telegraf
|
||||
```
|
||||
|
||||
### Zabbix
|
||||
|
||||
1. Import the zabbix template file `zbx_taos_keeper_templates.xml`.
|
||||
2. Use the template `TDengine` to create the host and modify the macros `{$TAOSKEEPER_HOST}`
|
||||
and `{$COLLECTION_INTERVAL}`.
|
||||
3. Waiting for monitoring items to be created automatically.
|
||||
|
||||
### FAQ
|
||||
|
||||
* Error occurred: Connection refused, while taosKeeper was starting
|
||||
|
||||
**Answer**: taoskeeper relies on restful interfaces to query data. Check whether the taosAdapter is running or whether
|
||||
the taosAdapter address in taoskeeper.toml is correct.
|
||||
|
||||
* Why detection metrics displayed by different TDengine's inconsistent with taoskeeper monitoring?
|
||||
|
||||
**Answer**: If a metric is not created in TDengine, taoskeeper cannot get the corresponding test results.
|
||||
|
||||
* Cannot receive log from TDengine server.
|
||||
|
||||
**Answer**: Modify `/etc/taos/taos.cfg` file and add parameters like:
|
||||
|
||||
```cfg
|
||||
monitor 1 // start monitor
|
||||
monitorInterval 30 // send log interval (s)
|
||||
monitorFqdn localhost
|
||||
monitorPort 6043 // taosKeeper port
|
||||
monitorMaxLogs 100
|
||||
```
|
||||
[AGPL-3.0 License](../../LICENSE)
|
||||
|
|
|
@ -411,9 +411,11 @@ func (p *Processor) Prepare() {
|
|||
|
||||
func (p *Processor) withDBName(tableName string) string {
|
||||
b := pool.BytesPoolGet()
|
||||
b.WriteByte('`')
|
||||
b.WriteString(p.db)
|
||||
b.WriteByte('.')
|
||||
b.WriteString("`.`")
|
||||
b.WriteString(tableName)
|
||||
b.WriteByte('`')
|
||||
return b.String()
|
||||
}
|
||||
|
||||
|
|
|
@ -119,3 +119,9 @@ func Test_getStatusStr(t *testing.T) {
|
|||
assert.Equal(t, tt.expected, res)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_withDBName(t *testing.T) {
|
||||
processor := &Processor{db: "db"}
|
||||
res := processor.withDBName("test")
|
||||
assert.Equal(t, res, "`db`.`test`")
|
||||
}
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
import "testing"
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
}
|
||||
|
|
|
@ -17,16 +17,10 @@
|
|||
#include <pwd.h>
|
||||
#endif
|
||||
|
||||
#include "cus_name.h"
|
||||
#include "shellInt.h"
|
||||
#include "version.h"
|
||||
|
||||
#if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL)
|
||||
#include "cus_name.h"
|
||||
#else
|
||||
#ifndef CUS_PROMPT
|
||||
#define CUS_PROMPT "taos"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define TAOS_CONSOLE_PROMPT_CONTINUE " -> "
|
||||
|
||||
|
|
|
@ -49,34 +49,6 @@
|
|||
// ---------------- define ----------------
|
||||
//
|
||||
|
||||
#if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL)
|
||||
#include <cus_name.h>
|
||||
#else
|
||||
#ifndef CUS_NAME
|
||||
#define CUS_NAME "TDengine"
|
||||
#endif
|
||||
|
||||
#ifndef CUS_PROMPT
|
||||
#define CUS_PROMPT "taos"
|
||||
#endif
|
||||
|
||||
#ifndef CUS_EMAIL
|
||||
#define CUS_EMAIL "<support@taosdata.com>"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// get taosdump commit number version
|
||||
#ifndef TAOSDUMP_COMMIT_SHA1
|
||||
#define TAOSDUMP_COMMIT_SHA1 "unknown"
|
||||
#endif
|
||||
|
||||
#ifndef TAOSDUMP_TAG
|
||||
#define TAOSDUMP_TAG "0.1.0"
|
||||
#endif
|
||||
|
||||
#ifndef TAOSDUMP_STATUS
|
||||
#define TAOSDUMP_STATUS "unknown"
|
||||
#endif
|
||||
|
||||
// use 256 as normal buffer length
|
||||
#define BUFFER_LEN 256
|
||||
|
|
|
@ -36,158 +36,52 @@ ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "loongarch64")
|
|||
MESSAGE(STATUS "Set CPUTYPE to loongarch64")
|
||||
ENDIF ()
|
||||
|
||||
#
|
||||
# collect --version information
|
||||
#
|
||||
MESSAGE("collect --version show info:")
|
||||
# version
|
||||
IF (DEFINED TD_VER_NUMBER)
|
||||
ADD_DEFINITIONS(-DTD_VER_NUMBER="${TD_VER_NUMBER}")
|
||||
MESSAGE(STATUS "version:${TD_VER_NUMBER}")
|
||||
ELSE ()
|
||||
# abort build
|
||||
MESSAGE(FATAL_ERROR "build taos-tools not found TD_VER_NUMBER define.")
|
||||
ENDIF ()
|
||||
|
||||
# commit id
|
||||
FIND_PACKAGE(Git)
|
||||
IF(GIT_FOUND)
|
||||
IF (EXISTS "${CMAKE_CURRENT_LIST_DIR}/../VERSION")
|
||||
MESSAGE("Found VERSION file")
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND grep "^taosdump" "${CMAKE_CURRENT_LIST_DIR}/../VERSION"
|
||||
RESULT_VARIABLE RESULT
|
||||
OUTPUT_VARIABLE TAOSDUMP_FULLTAG
|
||||
)
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND sh -c "git --git-dir=${CMAKE_CURRENT_LIST_DIR}/../.git --work-tree=${CMAKE_CURRENT_LIST_DIR}/.. log --pretty=oneline -n 1 HEAD"
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
|
||||
RESULT_VARIABLE RESULT
|
||||
OUTPUT_VARIABLE TAOSDUMP_COMMIT_SHA1
|
||||
)
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND grep "^taosbenchmark" "${CMAKE_CURRENT_LIST_DIR}/../VERSION"
|
||||
RESULT_VARIABLE RESULT
|
||||
OUTPUT_VARIABLE TAOSBENCHMARK_FULLTAG
|
||||
)
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND sh -c "git --git-dir=${CMAKE_CURRENT_LIST_DIR}/../.git --work-tree=${CMAKE_CURRENT_LIST_DIR}/.. log --pretty=oneline -n 1 HEAD"
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
|
||||
RESULT_VARIABLE RESULT
|
||||
OUTPUT_VARIABLE TAOSBENCHMARK_COMMIT_SHA1
|
||||
)
|
||||
ELSE ()
|
||||
MESSAGE("Use git tag")
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND sh -c "git for-each-ref --sort=taggerdate --format '%(tag)' refs/tags|grep taosdump|tail -1"
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
|
||||
RESULT_VARIABLE TAG_RESULT
|
||||
OUTPUT_VARIABLE TAOSDUMP_FULLTAG
|
||||
)
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND sh -c "git --git-dir=${CMAKE_CURRENT_LIST_DIR}/../.git --work-tree=${CMAKE_CURRENT_LIST_DIR}/.. log --pretty=oneline -n 1 ${TAOSDUMP_FULLTAG}"
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
|
||||
RESULT_VARIABLE RESULT
|
||||
OUTPUT_VARIABLE TAOSDUMP_COMMIT_SHA1
|
||||
)
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND sh -c "git for-each-ref --sort=taggerdate --format '%(tag)' refs/tags|grep taosbenchmark|tail -1"
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
|
||||
RESULT_VARIABLE TAG_RESULT
|
||||
OUTPUT_VARIABLE TAOSBENCHMARK_FULLTAG
|
||||
)
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND sh -c "git --git-dir=${CMAKE_CURRENT_LIST_DIR}/../.git --work-tree=${CMAKE_CURRENT_LIST_DIR}/.. log --pretty=oneline -n 1 ${TAOSBENCHMARK_FULLTAG}"
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
|
||||
RESULT_VARIABLE RESULT
|
||||
OUTPUT_VARIABLE TAOSBENCHMARK_COMMIT_SHA1
|
||||
)
|
||||
ENDIF ()
|
||||
# get
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND sh -c "git --git-dir=${CMAKE_CURRENT_LIST_DIR}/../.git --work-tree=${CMAKE_CURRENT_LIST_DIR}/.. status -z -s ${CMAKE_CURRENT_LIST_DIR}/taosdump.c"
|
||||
RESULT_VARIABLE RESULT
|
||||
OUTPUT_VARIABLE TAOSDUMP_STATUS
|
||||
ERROR_QUIET
|
||||
)
|
||||
COMMAND git log -1 --format=%H
|
||||
WORKING_DIRECTORY ${TD_COMMUNITY_DIR}
|
||||
OUTPUT_VARIABLE GIT_COMMIT_ID
|
||||
)
|
||||
|
||||
# version
|
||||
IF (DEFINED TD_VER_NUMBER)
|
||||
# use tdengine version
|
||||
SET(TAOSBENCHMARK_TAG ${TD_VER_NUMBER})
|
||||
SET(TAOSDUMP_TAG ${TD_VER_NUMBER})
|
||||
MESSAGE(STATUS "use TD_VER_NUMBER version: " ${TD_VER_NUMBER})
|
||||
ELSE ()
|
||||
# use internal version
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND sh "-c" "echo '${TAOSDUMP_FULLTAG}' | awk -F '-' '{print $2}'"
|
||||
RESULT_VARIABLE RESULT
|
||||
OUTPUT_VARIABLE TAOSDUMP_TAG
|
||||
)
|
||||
MESSAGE(STATUS "taosdump use origin version: " ${TAOSDUMP_TAG})
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND sh "-c" "echo '${TAOSBENCHMARK_FULLTAG}' | awk -F '-' '{print $2}'"
|
||||
RESULT_VARIABLE RESULT
|
||||
OUTPUT_VARIABLE TAOSBENCHMARK_TAG
|
||||
)
|
||||
MESSAGE(STATUS "taosBenchmark use origin version: " ${TAOSBENCHMARK_TAG})
|
||||
ENDIF ()
|
||||
STRING(SUBSTRING "${GIT_COMMIT_ID}" 0 40 TAOSBENCHMARK_COMMIT_ID)
|
||||
SET(TAOSDUMP_COMMIT_ID "${TAOSBENCHMARK_COMMIT_ID}")
|
||||
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND sh -c "git --git-dir=${CMAKE_CURRENT_LIST_DIR}/../.git --work-tree=${CMAKE_CURRENT_LIST_DIR}/.. status -z -s ${CMAKE_CURRENT_LIST_DIR}/bench*.c"
|
||||
RESULT_VARIABLE RESULT
|
||||
OUTPUT_VARIABLE TAOSBENCHMARK_STATUS
|
||||
ERROR_QUIET
|
||||
)
|
||||
IF ("${TAOSDUMP_COMMIT_SHA1}" STREQUAL "")
|
||||
SET(TAOSDUMP_COMMIT_SHA1 "unknown")
|
||||
ELSE ()
|
||||
STRING(SUBSTRING "${TAOSDUMP_COMMIT_SHA1}" 0 40 TAOSDUMP_COMMIT_SHA1)
|
||||
STRING(STRIP "${TAOSDUMP_COMMIT_SHA1}" TAOSDUMP_COMMIT_SHA1)
|
||||
ENDIF ()
|
||||
IF ("${TAOSDUMP_TAG}" STREQUAL "")
|
||||
SET(TAOSDUMP_TAG "0.1.0")
|
||||
ELSE ()
|
||||
STRING(STRIP "${TAOSDUMP_TAG}" TAOSDUMP_TAG)
|
||||
ENDIF ()
|
||||
|
||||
IF ("${TAOSBENCHMARK_COMMIT_SHA1}" STREQUAL "")
|
||||
SET(TAOSBENCHMARK_COMMIT_SHA1 "unknown")
|
||||
ELSE ()
|
||||
STRING(SUBSTRING "${TAOSBENCHMARK_COMMIT_SHA1}" 0 40 TAOSBENCHMARK_COMMIT_SHA1)
|
||||
STRING(STRIP "${TAOSBENCHMARK_COMMIT_SHA1}" TAOSBENCHMARK_COMMIT_SHA1)
|
||||
ENDIF ()
|
||||
IF ("${TAOSBENCHMARK_TAG}" STREQUAL "")
|
||||
SET(TAOSBENCHMARK_TAG "0.1.0")
|
||||
ELSE ()
|
||||
STRING(STRIP "${TAOSBENCHMARK_TAG}" TAOSBENCHMARK_TAG)
|
||||
ENDIF ()
|
||||
# show
|
||||
MESSAGE(STATUS "taosdump commit id: ${TAOSDUMP_COMMIT_ID}")
|
||||
MESSAGE(STATUS "taosBenchmark commit id: ${TAOSBENCHMARK_COMMIT_ID}")
|
||||
# define
|
||||
ADD_DEFINITIONS(-DTAOSDUMP_COMMIT_ID="${TAOSDUMP_COMMIT_ID}")
|
||||
ADD_DEFINITIONS(-DTAOSBENCHMARK_COMMIT_ID="${TAOSBENCHMARK_COMMIT_ID}")
|
||||
ELSE()
|
||||
MESSAGE("Git not found")
|
||||
SET(TAOSDUMP_COMMIT_SHA1 "unknown")
|
||||
SET(TAOSBENCHMARK_COMMIT_SHA1 "unknown")
|
||||
SET(TAOSDUMP_TAG "0.1.0")
|
||||
SET(TAOSDUMP_STATUS "unknown")
|
||||
SET(TAOSBENCHMARK_STATUS "unknown")
|
||||
MESSAGE(FATAL_ERROR "build taos-tools FIND_PACKAGE(Git) failed.")
|
||||
ENDIF (GIT_FOUND)
|
||||
|
||||
STRING(STRIP "${TAOSDUMP_STATUS}" TAOSDUMP_STATUS)
|
||||
STRING(STRIP "${TAOSBENCHMARK_STATUS}" TAOSBENCHMARK_STATUS)
|
||||
|
||||
IF (TAOSDUMP_STATUS MATCHES "M")
|
||||
SET(TAOSDUMP_STATUS "modified")
|
||||
ELSE()
|
||||
SET(TAOSDUMP_STATUS "")
|
||||
ENDIF ()
|
||||
|
||||
IF (TAOSBENCHMARK_STATUS MATCHES "M")
|
||||
SET(TAOSBENCHMARK_STATUS "modified")
|
||||
ELSE()
|
||||
SET(TAOSBENCHMARK_STATUS "")
|
||||
ENDIF ()
|
||||
|
||||
MESSAGE("")
|
||||
MESSAGE("taosdump last tag: ${TAOSDUMP_TAG}")
|
||||
MESSAGE("taosdump commit: ${TAOSDUMP_COMMIT_SHA1}")
|
||||
MESSAGE("taosdump status: ${TAOSDUMP_STATUS}")
|
||||
MESSAGE("")
|
||||
MESSAGE("taosBenchmark last tag: ${TAOSBENCHMARK_TAG}")
|
||||
MESSAGE("taosBenchmark commit: ${TAOSBENCHMARK_COMMIT_SHA1}")
|
||||
MESSAGE("taosBenchmark status: ${TAOSBENCHMARK_STATUS}")
|
||||
# build info
|
||||
SET(BUILD_INFO "${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}")
|
||||
ADD_DEFINITIONS(-DBUILD_INFO="${BUILD_INFO}")
|
||||
MESSAGE(STATUS "build:${BUILD_INFO}")
|
||||
MESSAGE("")
|
||||
|
||||
ADD_DEFINITIONS(-DTAOSDUMP_TAG="${TAOSDUMP_TAG}")
|
||||
ADD_DEFINITIONS(-DTAOSDUMP_COMMIT_SHA1="${TAOSDUMP_COMMIT_SHA1}")
|
||||
ADD_DEFINITIONS(-DTAOSDUMP_STATUS="${TAOSDUMP_STATUS}")
|
||||
ADD_DEFINITIONS(-DTAOSBENCHMARK_TAG="${TAOSBENCHMARK_TAG}")
|
||||
ADD_DEFINITIONS(-DTAOSBENCHMARK_COMMIT_SHA1="${TAOSBENCHMARK_COMMIT_SHA1}")
|
||||
ADD_DEFINITIONS(-DTAOSBENCHMARK_STATUS="${TAOSBENCHMARK_STATUS}")
|
||||
|
||||
#
|
||||
# build proj
|
||||
#
|
||||
LINK_DIRECTORIES(${CMAKE_BINARY_DIR}/build/lib ${CMAKE_BINARY_DIR}/build/lib64)
|
||||
LINK_DIRECTORIES(/usr/lib /usr/lib64)
|
||||
INCLUDE_DIRECTORIES(/usr/local/taos/include)
|
||||
|
@ -252,6 +146,7 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
|
|||
ENDIF()
|
||||
ENDIF ()
|
||||
|
||||
# websocket
|
||||
IF (${WEBSOCKET})
|
||||
ADD_DEFINITIONS(-DWEBSOCKET)
|
||||
INCLUDE_DIRECTORIES(/usr/local/include/)
|
||||
|
@ -277,6 +172,7 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
|
|||
SET(GCC_COVERAGE_LINK_FLAGS "-lgcov --coverage")
|
||||
ENDIF ()
|
||||
|
||||
# sanitizer
|
||||
IF (${BUILD_SANITIZER})
|
||||
MESSAGE("${Yellow} Enable memory sanitize by BUILD_SANITIZER ${ColourReset}")
|
||||
IF (${OS_ID} MATCHES "Darwin")
|
||||
|
@ -288,6 +184,7 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
|
|||
SET(TOOLS_SANITIZE_FLAG "")
|
||||
ENDIF ()
|
||||
|
||||
# TOOLS_BUILD_TYPE
|
||||
IF (${TOOLS_BUILD_TYPE} MATCHES "Debug")
|
||||
IF ((${TOOLS_SANITIZE} MATCHES "true") OR (${BUILD_SANITIZER}))
|
||||
MESSAGE("${Yellow} Enable memory sanitize by TOOLS_SANITIZE ${ColourReset}")
|
||||
|
|
|
@ -10,26 +10,13 @@
|
|||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*/
|
||||
|
||||
#include <bench.h>
|
||||
#include "cus_name.h" // include/util/
|
||||
#include <bench.h>
|
||||
#include "benchLog.h"
|
||||
#include <toolsdef.h>
|
||||
|
||||
extern char g_configDir[MAX_PATH_LEN];
|
||||
|
||||
// get taosBenchmark commit number version
|
||||
#ifndef TAOSBENCHMARK_COMMIT_SHA1
|
||||
#define TAOSBENCHMARK_COMMIT_SHA1 "unknown"
|
||||
#endif
|
||||
|
||||
#ifndef TAOSBENCHMARK_TAG
|
||||
#define TAOSBENCHMARK_TAG "0.1.0"
|
||||
#endif
|
||||
|
||||
#ifndef TAOSBENCHMARK_STATUS
|
||||
#define TAOSBENCHMARK_STATUS "unknown"
|
||||
#endif
|
||||
|
||||
|
||||
char *g_aggreFuncDemo[] = {"*",
|
||||
"count(*)",
|
||||
"avg(current)",
|
||||
|
@ -42,16 +29,10 @@ char *g_aggreFunc[] = {"*", "count(*)", "avg(C0)", "sum(C0)",
|
|||
"max(C0)", "min(C0)", "first(C0)", "last(C0)"};
|
||||
|
||||
void printVersion() {
|
||||
char taosBenchmark_ver[] = TAOSBENCHMARK_TAG;
|
||||
char taosBenchmark_commit[] = TAOSBENCHMARK_COMMIT_SHA1;
|
||||
char taosBenchmark_status[] = TAOSBENCHMARK_STATUS;
|
||||
|
||||
// version
|
||||
printf("taosBenchmark version: %s\ngit: %s\n", taosBenchmark_ver, taosBenchmark_commit);
|
||||
printf("build: %s\n", getBuildInfo());
|
||||
if (strlen(taosBenchmark_status) > 0) {
|
||||
printf("status: %s\n", taosBenchmark_status);
|
||||
}
|
||||
// version, macro define in src/CMakeLists.txt
|
||||
printf("%s\n%sBenchmark version: %s\n", TD_PRODUCT_NAME, CUS_PROMPT, TD_VER_NUMBER);
|
||||
printf("git: %s\n", TAOSBENCHMARK_COMMIT_ID);
|
||||
printf("build: %s\n", BUILD_INFO);
|
||||
}
|
||||
|
||||
void parseFieldDatatype(char *dataType, BArray *fields, bool isTag) {
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <stdlib.h>
|
||||
#include <bench.h>
|
||||
#include "benchLog.h"
|
||||
#include "cus_name.h"
|
||||
|
||||
#ifdef LINUX
|
||||
#include <argp.h>
|
||||
|
@ -21,23 +22,7 @@
|
|||
#endif
|
||||
#endif
|
||||
|
||||
extern char version[];
|
||||
|
||||
#if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL)
|
||||
#include "cus_name.h"
|
||||
#else
|
||||
#ifndef CUS_NAME
|
||||
#define CUS_NAME "TDengine"
|
||||
#endif
|
||||
|
||||
#ifndef CUS_PROMPT
|
||||
#define CUS_PROMPT "taos"
|
||||
#endif
|
||||
|
||||
#ifndef CUS_EMAIL
|
||||
#define CUS_EMAIL "<support@taosdata.com>"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef WINDOWS
|
||||
char g_configDir[MAX_PATH_LEN] = {0}; // "C:\\TDengine\\cfg"};
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
|
||||
#define _GNU_SOURCE
|
||||
|
||||
#include "cus_name.h" // include/util/
|
||||
#include "dump.h"
|
||||
#include "dumpUtil.h"
|
||||
#ifdef WEBSOCKET
|
||||
|
@ -236,8 +237,6 @@ struct arguments g_args = {
|
|||
1000 // retrySleepMs
|
||||
};
|
||||
|
||||
|
||||
|
||||
static uint64_t getUniqueIDFromEpoch() {
|
||||
struct timeval tv;
|
||||
|
||||
|
@ -256,24 +255,17 @@ static uint64_t getUniqueIDFromEpoch() {
|
|||
return id;
|
||||
}
|
||||
|
||||
|
||||
// --version -V
|
||||
static void printVersion(FILE *file) {
|
||||
char taostools_longver[] = TAOSDUMP_TAG;
|
||||
char taosdump_status[] = TAOSDUMP_STATUS;
|
||||
|
||||
char *dupSeq = strdup(taostools_longver);
|
||||
char *running = dupSeq;
|
||||
char *taostools_ver = strsep(&running, "-");
|
||||
|
||||
char taosdump_commit[] = TAOSDUMP_COMMIT_SHA1;
|
||||
|
||||
fprintf(file,"taosdump version: %s\ngit: %s\n", taostools_ver, taosdump_commit);
|
||||
printf("build: %s\n", getBuildInfo());
|
||||
if (strlen(taosdump_status) > 0) {
|
||||
fprintf(file, "status:%s\n", taosdump_status);
|
||||
if (file == NULL) {
|
||||
printf("fail, printVersion file is null.\n");
|
||||
return ;
|
||||
}
|
||||
|
||||
free(dupSeq);
|
||||
// version, macro define in src/CMakeLists.txt
|
||||
fprintf(file, "%s\n%sdump version: %s\n", TD_PRODUCT_NAME, CUS_PROMPT, TD_VER_NUMBER);
|
||||
fprintf(file, "git: %s\n", TAOSDUMP_COMMIT_ID);
|
||||
fprintf(file, "build: %s\n", BUILD_INFO);
|
||||
}
|
||||
|
||||
static char *typeToStr(int type) {
|
||||
|
@ -8928,8 +8920,8 @@ static int dumpExtraInfoHead(void *taos, FILE *fp) {
|
|||
errno, strerror(errno));
|
||||
}
|
||||
|
||||
char taostools_ver[] = TAOSDUMP_TAG;
|
||||
char taosdump_commit[] = TAOSDUMP_COMMIT_SHA1;
|
||||
char taostools_ver[] = TD_VER_NUMBER;
|
||||
char taosdump_commit[] = TAOSDUMP_COMMIT_ID;
|
||||
|
||||
snprintf(buffer, BUFFER_LEN, "#!"CUS_PROMPT"dump_ver: %s_%s\n",
|
||||
taostools_ver, taosdump_commit);
|
||||
|
@ -9448,7 +9440,7 @@ static int dumpInDbs(const char *dbPath) {
|
|||
}
|
||||
#endif
|
||||
|
||||
int taosToolsMajorVer = atoi(TAOSDUMP_TAG);
|
||||
int taosToolsMajorVer = atoi(TD_VER_NUMBER);
|
||||
if ((g_dumpInDataMajorVer > 1) && (1 == taosToolsMajorVer)) {
|
||||
errorPrint("\tThe data file was generated by version %d\n"
|
||||
"\tCannot be restored by current version: %d\n\n"
|
||||
|
|
Loading…
Reference in New Issue