fix:conflicts from 3.0
|
@ -53,6 +53,7 @@ def check_docs() {
|
|||
}
|
||||
sh '''
|
||||
cd ${WKC}
|
||||
git remote prune origin
|
||||
git pull >/dev/null
|
||||
git fetch origin +refs/pull/${CHANGE_ID}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
|
|
|
@ -210,14 +210,14 @@ cmake .. -G "NMake Makefiles"
|
|||
nmake
|
||||
```
|
||||
|
||||
### macOS 系统
|
||||
<!-- ### macOS 系统
|
||||
|
||||
安装 Xcode 命令行工具和 cmake. 在 Catalina 和 Big Sur 操作系统上,需要安装 XCode 11.4+ 版本。
|
||||
|
||||
```bash
|
||||
mkdir debug && cd debug
|
||||
cmake .. && cmake --build .
|
||||
```
|
||||
``` -->
|
||||
|
||||
# 安装
|
||||
|
||||
|
|
|
@ -211,14 +211,14 @@ cmake .. -G "NMake Makefiles"
|
|||
nmake
|
||||
```
|
||||
|
||||
### On macOS platform
|
||||
<!-- ### On macOS platform
|
||||
|
||||
Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur.
|
||||
|
||||
```shell
|
||||
mkdir debug && cd debug
|
||||
cmake .. && cmake --build .
|
||||
```
|
||||
``` -->
|
||||
|
||||
# Installing
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.0.0.0")
|
||||
SET(TD_VER_NUMBER "3.0.0.1")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG d237772
|
||||
GIT_TAG 2af2222
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -5,7 +5,7 @@ slug: /
|
|||
---
|
||||
|
||||
|
||||
TDengine is an open source, cloud native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. It’s written mainly for architects, developers and system administrators.
|
||||
TDengine is an [open source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud native](https://tdengine.com/tdengine/cloud-native-time-series-database/) time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. It’s written mainly for architects, developers and system administrators.
|
||||
|
||||
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ title: Introduction
|
|||
toc_max_heading_level: 2
|
||||
---
|
||||
|
||||
TDengine is an open source, high-performance, cloud native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation.
|
||||
TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation.
|
||||
|
||||
This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine.
|
||||
|
||||
|
@ -33,17 +33,18 @@ For more details on features, please read through the entire documentation.
|
|||
|
||||
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other time series databases, with the following advantages.
|
||||
|
||||
- **High-Performance**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
|
||||
- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
|
||||
|
||||
- **Simplified Solution**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
|
||||
- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
|
||||
|
||||
- **Cloud Native**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
|
||||
- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
|
||||
|
||||
- **Ease of Use**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
|
||||
- **[Ease of Use](https://tdengine.com/tdengine/easy-time-series-data-platform/)**: For administrators, TDengine significantly reduces the effort to[
|
||||
](https://tdengine.com/tdengine/easy-time-series-data-platform/) deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
|
||||
|
||||
- **Easy Data Analytics**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
||||
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
||||
|
||||
- **Open Source**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
|
||||
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
|
||||
|
||||
With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. 1: With its superior performance, the computing and storage resources are reduced significantly;2: With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly;3: With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly.
|
||||
|
||||
|
@ -96,14 +97,13 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
|
|||
| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
|
||||
| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. |
|
||||
| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the Taos shell for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs.|
|
||||
| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the TDengine CLI for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs.|
|
||||
| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine.|
|
||||
|
||||
## Comparison with other databases
|
||||
|
||||
- [Writing Performance Comparison of TDengine and InfluxDB ](https://tdengine.com/2022/02/23/4975.html)
|
||||
- [Query Performance Comparison of TDengine and InfluxDB](https://tdengine.com/2022/02/24/5120.html)
|
||||
- [TDengine vs InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse](https://www.tdengine.com/downloads/TDengine_Testing_Report_en.pdf)
|
||||
- [TDengine vs OpenTSDB](https://tdengine.com/2019/09/12/710.html)
|
||||
- [TDengine vs Cassandra](https://tdengine.com/2019/09/12/708.html)
|
||||
- [TDengine vs InfluxDB](https://tdengine.com/2019/09/12/706.html)
|
||||
|
|
|
@ -42,7 +42,7 @@ To do so, run the following command:
|
|||
|
||||
```
|
||||
|
||||
This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to ten and a `location` tag of either `California.SanFrancisco` or `California.SanDiego`.
|
||||
This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`.
|
||||
|
||||
The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required depends on the hardware specifications of the local system.
|
||||
|
||||
|
|
|
@ -67,16 +67,6 @@ Users will be prompted to enter some configuration information when install.sh i
|
|||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Windows" value="windows">
|
||||
|
||||
1. Download the Windows installation package.
|
||||
<PkgListV3 type={3}/>
|
||||
2. Run the downloaded package to install TDengine.
|
||||
:::info
|
||||
TDengine only supports Windows Server 2016/2019 and windows 10/11 system versions on the windows platform.
|
||||
:::
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="apt-get" label="apt-get">
|
||||
You can use `apt-get` to install TDengine from the official package repository.
|
||||
|
||||
|
@ -105,6 +95,15 @@ sudo apt-get install tdengine
|
|||
:::tip
|
||||
This installation method is supported only for Debian and Ubuntu.
|
||||
::::
|
||||
</TabItem>
|
||||
<TabItem label="Windows" value="windows">
|
||||
|
||||
Note: TDengine only supports Windows Server 2016/2019 and windows 10/11 system versions on the windows platform.
|
||||
|
||||
1. Download the Windows installation package.
|
||||
<PkgListV3 type={3}/>
|
||||
2. Run the downloaded package to install TDengine.
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
|
|
@ -846,7 +846,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
### INTERP
|
||||
|
||||
```sql
|
||||
SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
|
||||
SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] RANGE(timestamp1,timestamp2) EVERY(interval) FILL({ VALUE | PREV | NULL | LINEAR | NEXT});
|
||||
```
|
||||
|
||||
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned.
|
||||
|
@ -861,11 +861,10 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [
|
|||
|
||||
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
|
||||
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
|
||||
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. If `RANGE` is not specified, then the timestamp of the first row that matches the filter condition is treated as timestamp1, the timestamp of the last row that matches the filter condition is treated as timestamp2.
|
||||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. If `EVERY` parameter is not used, the time windows will be considered as no ending timestamp, i.e. there is only one time window from timestamp1.
|
||||
- Interpolation is performed based on `FILL` parameter. No interpolation is performed if `FILL` is not used, that means either the original data that matches is returned or nothing is returned.
|
||||
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `group by tbname` when it's used on a STable. It can't be used with `GROUP BY` when it's used in the inner query of a nested query.
|
||||
- The result of `INTERP` is not influenced by `ORDER BY TIMESTAMP`, which impacts the output order only..
|
||||
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
|
||||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter.
|
||||
- Interpolation is performed based on `FILL` parameter.
|
||||
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
|
||||
|
||||
### LAST
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: PHP
|
||||
sidebar_label: PHP (community contribution)
|
||||
title: PHP Connector
|
||||
---
|
||||
|
||||
|
|
|
@ -150,10 +150,19 @@ If the test is successful, it will output the server version information, e.g.
|
|||
|
||||
```json
|
||||
{
|
||||
"status": "succ",
|
||||
"head": ["server_version()"],
|
||||
"column_meta": [["server_version()", 8, 8]],
|
||||
"data": [["2.4.0.16"]],
|
||||
"code": 0,
|
||||
"column_meta": [
|
||||
[
|
||||
"server_version()",
|
||||
"VARCHAR",
|
||||
7
|
||||
]
|
||||
],
|
||||
"data": [
|
||||
[
|
||||
"3.0.0.0"
|
||||
]
|
||||
],
|
||||
"rows": 1
|
||||
}
|
||||
```
|
||||
|
|
|
@ -115,7 +115,7 @@ If you want to start your application in a container, you need to add the corres
|
|||
```docker
|
||||
FROM ubuntu:20.04
|
||||
RUN apt-get update && apt-get install -y wget
|
||||
ENV TDENGINE_VERSION=2.4.0.0
|
||||
ENV TDENGINE_VERSION=3.0.0.0
|
||||
RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& cd TDengine-client-${TDENGINE_VERSION} \
|
||||
|
@ -216,7 +216,7 @@ Here is the full Dockerfile:
|
|||
|
||||
```docker
|
||||
FROM golang:1.17.6-buster as builder
|
||||
ENV TDENGINE_VERSION=2.4.0.0
|
||||
ENV TDENGINE_VERSION=3.0.0.0
|
||||
RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& cd TDengine-client-${TDENGINE_VERSION} \
|
||||
|
@ -232,7 +232,7 @@ RUN go build
|
|||
|
||||
FROM ubuntu:20.04
|
||||
RUN apt-get update && apt-get install -y wget
|
||||
ENV TDENGINE_VERSION=2.4.0.0
|
||||
ENV TDENGINE_VERSION=3.0.0.0
|
||||
RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& cd TDengine-client-${TDENGINE_VERSION} \
|
||||
|
@ -320,7 +320,7 @@ password: taosdata
|
|||
2. Start the cluster
|
||||
|
||||
```shell
|
||||
$ VERSION=2.4.0.0 docker-compose up -d
|
||||
$ VERSION=3.0.0.0 docker-compose up -d
|
||||
Creating network "test_default" with the default driver
|
||||
Creating volume "test_taosdata-td1" with default driver
|
||||
Creating volume "test_taoslog-td1" with default driver
|
||||
|
@ -457,7 +457,7 @@ If you want to deploy a container-based TDengine cluster on multiple hosts, you
|
|||
The docker-compose file can refer to the previous section. Here is the command to start TDengine with docker swarm:
|
||||
|
||||
```shell
|
||||
$ VERSION=2.4.0 docker stack deploy -c docker-compose.yml taos
|
||||
$ VERSION=3.0.0.0 docker stack deploy -c docker-compose.yml taos
|
||||
Creating network taos_inter
|
||||
Creating network taos_api
|
||||
Creating service taos_arbitrator
|
||||
|
@ -473,20 +473,20 @@ Checking status:
|
|||
$ docker stack ps taos
|
||||
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
|
||||
79ni8temw59n taos_nginx.1 nginx:latest TM1701 Running Running about a minute ago
|
||||
3e94u72msiyg taos_adapter.1 tdengine/tdengine:2.4.0 TM1702 Running Running 56 seconds ago
|
||||
100amjkwzsc6 taos_td-2.1 tdengine/tdengine:2.4.0 TM1703 Running Running about a minute ago
|
||||
pkjehr2vvaaa taos_td-1.1 tdengine/tdengine:2.4.0 TM1704 Running Running 2 minutes ago
|
||||
tpzvgpsr1qkt taos_arbitrator.1 tdengine/tdengine:2.4.0 TM1705 Running Running 2 minutes ago
|
||||
rvss3g5yg6fa taos_adapter.2 tdengine/tdengine:2.4.0 TM1706 Running Running 56 seconds ago
|
||||
i2augxamfllf taos_adapter.3 tdengine/tdengine:2.4.0 TM1707 Running Running 56 seconds ago
|
||||
lmjyhzccpvpg taos_adapter.4 tdengine/tdengine:2.4.0 TM1708 Running Running 56 seconds ago
|
||||
3e94u72msiyg taos_adapter.1 tdengine/tdengine:3.0.0.0 TM1702 Running Running 56 seconds ago
|
||||
100amjkwzsc6 taos_td-2.1 tdengine/tdengine:3.0.0.0 TM1703 Running Running about a minute ago
|
||||
pkjehr2vvaaa taos_td-1.1 tdengine/tdengine:3.0.0.0 TM1704 Running Running 2 minutes ago
|
||||
tpzvgpsr1qkt taos_arbitrator.1 tdengine/tdengine:3.0.0.0 TM1705 Running Running 2 minutes ago
|
||||
rvss3g5yg6fa taos_adapter.2 tdengine/tdengine:3.0.0.0 TM1706 Running Running 56 seconds ago
|
||||
i2augxamfllf taos_adapter.3 tdengine/tdengine:3.0.0.0 TM1707 Running Running 56 seconds ago
|
||||
lmjyhzccpvpg taos_adapter.4 tdengine/tdengine:3.0.0.0 TM1708 Running Running 56 seconds ago
|
||||
$ docker service ls
|
||||
ID NAME MODE REPLICAS IMAGE PORTS
|
||||
561t4lu6nfw6 taos_adapter replicated 4/4 tdengine/tdengine:2.4.0
|
||||
3hk5ct3q90sm taos_arbitrator replicated 1/1 tdengine/tdengine:2.4.0
|
||||
561t4lu6nfw6 taos_adapter replicated 4/4 tdengine/tdengine:3.0.0.0
|
||||
3hk5ct3q90sm taos_arbitrator replicated 1/1 tdengine/tdengine:3.0.0.0
|
||||
d8qr52envqzu taos_nginx replicated 1/1 nginx:latest *:6041->6041/tcp, *:6044->6044/udp
|
||||
2isssfvjk747 taos_td-1 replicated 1/1 tdengine/tdengine:2.4.0
|
||||
9pzw7u02ichv taos_td-2 replicated 1/1 tdengine/tdengine:2.4.0
|
||||
2isssfvjk747 taos_td-1 replicated 1/1 tdengine/tdengine:3.0.0.0
|
||||
9pzw7u02ichv taos_td-2 replicated 1/1 tdengine/tdengine:3.0.0.0
|
||||
```
|
||||
|
||||
From the above output, you can see two dnodes, two taosAdapters, and one Nginx reverse proxy service.
|
||||
|
@ -502,5 +502,5 @@ verify: Service converged
|
|||
|
||||
$ docker service ls -f name=taos_adapter
|
||||
ID NAME MODE REPLICAS IMAGE PORTS
|
||||
561t4lu6nfw6 taos_adapter replicated 1/1 tdengine/tdengine:2.4.0
|
||||
561t4lu6nfw6 taos_adapter replicated 1/1 tdengine/tdengine:3.0.0.0
|
||||
```
|
||||
|
|
|
@ -39,15 +39,8 @@ $ echo "foo:1|c" | nc -u -w0 127.0.0.1 8125
|
|||
Use the TDengine CLI to verify that StatsD data is written to TDengine and can read out correctly.
|
||||
|
||||
```
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.4.0.0
|
||||
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
taos> show databases;
|
||||
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
|
||||
====================================================================================================================================================================================================================================================================================
|
||||
log | 2022-04-20 07:19:50.260 | 11 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
|
||||
statsd | 2022-04-20 09:54:51.220 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
|
||||
Query OK, 2 row(s) in set (0.003142s)
|
||||
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
|
||||
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
taos> use statsd;
|
||||
Database changed.
|
||||
|
|
Before Width: | Height: | Size: 6.9 KiB After Width: | Height: | Size: 26 KiB |
|
@ -34,7 +34,7 @@ Please refer to the [official documentation](https://grafana.com/grafana/downloa
|
|||
|
||||
### TDengine
|
||||
|
||||
Download the latest TDengine-server 2.4.0.x or above from the [Downloads](http://taosdata.com/cn/all-downloads/) page on the TAOSData website and install it.
|
||||
Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/all-downloads/) page on the TAOSData website and install it.
|
||||
|
||||
## Data Connection Setup
|
||||
|
||||
|
@ -79,5 +79,5 @@ Click on the plus icon on the left and select `Import` to get the data from `htt
|
|||
|
||||
## Wrap-up
|
||||
|
||||
The above demonstrates how to quickly build a IT DevOps visualization system. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system in just a few minutes.
|
||||
The above demonstrates how to quickly build a IT DevOps visualization system. Thanks to the schemaless protocol parsing feature in TDengine and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system in just a few minutes.
|
||||
Please refer to the official documentation and product implementation cases for other features.
|
||||
|
|
|
@ -37,7 +37,7 @@ Please refer to the [official documentation](https://grafana.com/grafana/downloa
|
|||
|
||||
### Install TDengine
|
||||
|
||||
Download the latest TDengine-server 2.4.0.x or above from the [Downloads](http://taosdata.com/cn/all-downloads/) page on the TAOSData website and install it.
|
||||
Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/all-downloads/) page on the TAOSData website and install it.
|
||||
|
||||
## Data Connection Setup
|
||||
|
||||
|
@ -99,6 +99,6 @@ Download the dashboard json from `https://github.com/taosdata/grafanaplugin/blob
|
|||
|
||||
## Wrap-up
|
||||
|
||||
TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system, or adapt an existing system, in just a few minutes.
|
||||
TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. Thanks to the new schemaless protocol parsing feature in TDengine and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system, or adapt an existing system, in just a few minutes.
|
||||
|
||||
For TDengine's powerful data writing and querying performance and other features, please refer to the official documentation and successful product implementation cases.
|
||||
|
|
Before Width: | Height: | Size: 38 KiB After Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 30 KiB After Width: | Height: | Size: 15 KiB |
Before Width: | Height: | Size: 41 KiB After Width: | Height: | Size: 15 KiB |
|
@ -11,10 +11,10 @@ conn: taos.TaosConnection = taos.connect(host="localhost",
|
|||
server_version = conn.server_info
|
||||
print("server_version", server_version)
|
||||
client_version = conn.client_info
|
||||
print("client_version", client_version) # 2.4.0.16
|
||||
print("client_version", client_version) # 3.0.0.0
|
||||
|
||||
conn.close()
|
||||
|
||||
# possible output:
|
||||
# 2.4.0.16
|
||||
# 2.4.0.16
|
||||
# 3.0.0.0
|
||||
# 3.0.0.0
|
||||
|
|
|
@ -22,8 +22,8 @@ TDengine的主要功能如下:
|
|||
9. 提供[命令行程序](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询
|
||||
10. 提供多种数据的[导入](../operation/import)、[导出](../operation/export)
|
||||
11. 支持对[TDengine 集群本身的监控](../operation/monitor)
|
||||
12. 提供 [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) 等多种编程语言的[连接器](../reference/connector/)
|
||||
13. 支持 [REST 接口](../reference/rest-api/)
|
||||
12. 提供各种语言的[连接器](../connector): 如 C/C++, Java, Go, Node.JS, Rust, Python, C# 等
|
||||
13. 支持 [REST 接口](../connector/rest-api/)
|
||||
14. 支持与[ Grafana 无缝集成](../third-party/grafana)
|
||||
15. 支持与 Google Data Studio 无缝集成
|
||||
16. 支持 [Kubernetes 部署](../deployment/k8s)
|
||||
|
|
|
@ -9,7 +9,7 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
|
||||
您可以[用 Docker 立即体验](../../get-started/docker/) TDengine。如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
|
||||
|
||||
TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。目前 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/taosadapter/) 提供 [RESTful 接口](../../reference/rest-api/)。
|
||||
TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。目前 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/taosadapter/) 提供 [RESTful 接口](../../connector/rest-api/)。
|
||||
|
||||
为方便使用,标准的服务端安装包包含了 taosd、taosAdapter、taosc、taos、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 lite 版本的安装包。
|
||||
|
||||
|
@ -67,16 +67,6 @@ install.sh 安装脚本在执行过程中,会通过命令行交互界面询问
|
|||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Windows 安装" value="windows">
|
||||
|
||||
1. 从列表中下载获得 exe 安装程序;
|
||||
<PkgListV3 type={3}/>
|
||||
2. 运行可执行程序来安装 TDengine。
|
||||
:::info
|
||||
目前 TDengine 在 Windows 平台上只支持 Windows server 2016/2019 和 Windows 10/11 系统版本。
|
||||
:::
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="apt-get" label="apt-get">
|
||||
可以使用 apt-get 工具从官方仓库安装。
|
||||
|
||||
|
@ -105,11 +95,20 @@ sudo apt-get install tdengine
|
|||
:::tip
|
||||
apt-get 方式只适用于 Debian 或 Ubuntu 系统
|
||||
::::
|
||||
</TabItem>
|
||||
<TabItem label="Windows 安装" value="windows">
|
||||
|
||||
注意:目前 TDengine 在 Windows 平台上只支持 Windows server 2016/2019 和 Windows 10/11 系统版本。
|
||||
|
||||
1. 从列表中下载获得 exe 安装程序;
|
||||
<PkgListV3 type={3}/>
|
||||
2. 运行可执行程序来安装 TDengine。
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
:::info
|
||||
下载其他组件、最新 Beta 版及之前版本的安装包,请点击[发布历史页面](../../releases)
|
||||
下载其他组件、最新 Beta 版及之前版本的安装包,请点击[发布历史页面](../../releases/tdengine)
|
||||
:::
|
||||
|
||||
:::note
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
import PkgList from "/components/PkgList";
|
||||
|
||||
TDengine 的安装非常简单,从下载到安装成功仅仅只要几秒钟。
|
||||
|
||||
为方便使用,从 2.4.0.10 开始,标准的服务端安装包包含了 taos、taosd、taosAdapter、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 lite 版本的安装包。
|
||||
|
||||
在安装包格式上,我们提供 tar.gz, rpm 和 deb 格式,为企业客户提供 tar.gz 格式安装包,以方便在特定操作系统上使用。需要注意的是,rpm 和 deb 包不含 taosdump、taosBenchmark 和 TDinsight 安装脚本,这些工具需要通过安装 taosTool 包获得。
|
||||
|
||||
发布版本包括稳定版和 Beta 版,Beta 版含有更多新功能。正式上线或测试建议安装稳定版。您可以根据需要选择下载:
|
||||
|
||||
<PkgList type={0}/>
|
||||
|
||||
具体的安装方法,请参见[安装包的安装和卸载](/operation/pkg-install)。
|
||||
|
||||
下载其他组件、最新 Beta 版及之前版本的安装包,请点击[这里](https://www.taosdata.com/all-downloads)
|
||||
|
||||
查看 Release Notes, 请点击[这里](https://github.com/taosdata/TDengine/releases)
|
|
@ -3,7 +3,7 @@ title: 立即开始
|
|||
description: '快速设置 TDengine 环境并体验其高效写入和查询'
|
||||
---
|
||||
|
||||
TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](/reference/taosadapter) 提供 [RESTful 接口](/reference/rest-api)。
|
||||
TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../reference/taosadapter) 提供 [RESTful 接口](../connector/rest-api)。
|
||||
|
||||
本章主要介绍如何利用 Docker 或者安装包快速设置 TDengine 环境并体验其高效写入和查询。
|
||||
|
||||
|
|
|
@ -12,4 +12,4 @@
|
|||
{{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}}
|
||||
```
|
||||
|
||||
更多连接参数配置,参考[Java 连接器](/reference/connector/java)
|
||||
更多连接参数配置,参考[Java 连接器](../../connector/java)
|
||||
|
|
|
@ -14,10 +14,10 @@ import ConnCSNative from "./_connect_cs.mdx";
|
|||
import ConnC from "./_connect_c.mdx";
|
||||
import ConnR from "./_connect_r.mdx";
|
||||
import ConnPHP from "./_connect_php.mdx";
|
||||
import InstallOnWindows from "../../14-reference/03-connector/_linux_install.mdx";
|
||||
import InstallOnLinux from "../../14-reference/03-connector/_windows_install.mdx";
|
||||
import VerifyLinux from "../../14-reference/03-connector/_verify_linux.mdx";
|
||||
import VerifyWindows from "../../14-reference/03-connector/_verify_windows.mdx";
|
||||
import InstallOnWindows from "../../08-connector/_linux_install.mdx";
|
||||
import InstallOnLinux from "../../08-connector/_windows_install.mdx";
|
||||
import VerifyLinux from "../../08-connector/_verify_linux.mdx";
|
||||
import VerifyWindows from "../../08-connector/_verify_windows.mdx";
|
||||
|
||||
TDengine 提供了丰富的应用程序开发接口,为了便于用户快速开发自己的应用,TDengine 支持了多种编程语言的连接器,其中官方连接器包括支持 C/C++、Java、Python、Go、Node.js、C#、Rust、Lua(社区贡献)和 PHP (社区贡献)的连接器。这些连接器支持使用原生接口(taosc)和 REST 接口(部分语言暂不支持)连接 TDengine 集群。社区开发者也贡献了多个非官方连接器,例如 ADO.NET 连接器、Lua 连接器和 PHP 连接器。
|
||||
|
||||
|
@ -33,7 +33,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
|
|||
关键不同点在于:
|
||||
|
||||
1. 使用 REST 连接,用户无需安装客户端驱动程序 taosc,具有跨平台易用的优势,但性能要下降 30%左右。
|
||||
2. 使用原生连接可以体验 TDengine 的全部功能,如[参数绑定接口](/reference/connector/cpp#参数绑定-api)、[订阅](/reference/connector/cpp#订阅和消费-api)等等。
|
||||
2. 使用原生连接可以体验 TDengine 的全部功能,如[参数绑定接口](../../connector/cpp/#参数绑定-api)、[订阅](../../connector/cpp/#订阅和消费-api)等等。
|
||||
|
||||
## 安装客户端驱动 taosc
|
||||
|
||||
|
@ -223,7 +223,7 @@ phpize && ./configure && make -j && make install
|
|||
**手动指定 TDengine 目录:**
|
||||
|
||||
```shell
|
||||
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/2.4.0.0 && make -j && make install
|
||||
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
|
||||
```
|
||||
|
||||
> `--with-tdengine-dir=` 后跟上 TDengine 目录。
|
||||
|
|
|
@ -64,7 +64,7 @@ DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
|
|||
DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
|
||||
```
|
||||
|
||||
这些 API 的文档请见 [C/C++ Connector](/reference/connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
|
||||
这些 API 的文档请见 [C/C++ Connector](../../connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="java" label="Java">
|
||||
|
|
|
@ -12,7 +12,7 @@ title: 开发指南
|
|||
7. 在很多场景下(如车辆管理),应用需要获取每个数据采集点的最新状态,那么建议你采用TDengine的cache功能,而不用单独部署Redis等缓存软件。
|
||||
8. 如果你发现TDengine的函数无法满足你的要求,那么你可以使用用户自定义函数来解决问题。
|
||||
|
||||
本部分内容就是按照上述的顺序组织的。为便于理解,TDengine为每个功能为每个支持的编程语言都提供了示例代码。如果你希望深入了解SQL的使用,需要查看[SQL手册](/taos-sql/)。如果想更深入地了解各连接器的使用,请阅读[连接器参考指南](/reference/connector/)。如果还希望想将TDengine与第三方系统集成起来,比如Grafana, 请参考[第三方工具](/third-party/)。
|
||||
本部分内容就是按照上述的顺序组织的。为便于理解,TDengine为每个功能为每个支持的编程语言都提供了示例代码。如果你希望深入了解SQL的使用,需要查看[SQL手册](/taos-sql/)。如果想更深入地了解各连接器的使用,请阅读[连接器参考指南](../connector/)。如果还希望想将TDengine与第三方系统集成起来,比如Grafana, 请参考[第三方工具](../third-party/)。
|
||||
|
||||
如果在开发过程中遇到任何问题,请点击每个页面下方的["反馈问题"](https://github.com/taosdata/TDengine/issues/new/choose), 在GitHub上直接递交issue。
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ TDengine 客户端驱动的动态库位于:
|
|||
|
||||
## 支持的平台
|
||||
|
||||
请参考[支持的平台列表](/reference/connector#支持的平台)
|
||||
请参考[支持的平台列表](../#支持的平台)
|
||||
|
||||
## 支持的版本
|
||||
|
||||
|
@ -30,7 +30,7 @@ TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一
|
|||
|
||||
## 安装步骤
|
||||
|
||||
TDengine 客户端驱动的安装请参考 [安装指南](/reference/connector#安装步骤)
|
||||
TDengine 客户端驱动的安装请参考 [安装指南](../#安装步骤)
|
||||
|
||||
## 建立连接
|
||||
|
|
@ -35,7 +35,7 @@ REST 连接支持所有能运行 Java 的平台。
|
|||
|
||||
## 版本支持
|
||||
|
||||
请参考[版本支持列表](/reference/connector#版本支持)
|
||||
请参考[版本支持列表](../#版本支持)
|
||||
|
||||
## TDengine DataType 和 Java DataType
|
||||
|
||||
|
@ -64,7 +64,7 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
|
|||
使用 Java Connector 连接数据库前,需要具备以下条件:
|
||||
|
||||
- 已安装 Java 1.8 或以上版本运行时环境和 Maven 3.6 或以上版本
|
||||
- 已安装 TDengine 客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装),具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动)
|
||||
- 已安装 TDengine 客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装),具体步骤请参考[安装客户端驱动](../#安装客户端驱动)
|
||||
|
||||
### 安装连接器
|
||||
|
||||
|
@ -630,7 +630,7 @@ public void setNString(int columnIndex, ArrayList<String> list, int size) throws
|
|||
|
||||
### 无模式写入
|
||||
|
||||
TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](../../schemaless)。
|
||||
TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](../../reference/schemaless/)。
|
||||
|
||||
**注意**:
|
||||
|
|
@ -9,11 +9,11 @@ import Tabs from '@theme/Tabs';
|
|||
import TabItem from '@theme/TabItem';
|
||||
|
||||
import Preparition from "./_preparition.mdx"
|
||||
import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx"
|
||||
import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx"
|
||||
import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx"
|
||||
import GoOpenTSDBJson from "../../07-develop/03-insert-data/_go_opts_json.mdx"
|
||||
import GoQuery from "../../07-develop/04-query-data/_go.mdx"
|
||||
import GoInsert from "../07-develop/03-insert-data/_go_sql.mdx"
|
||||
import GoInfluxLine from "../07-develop/03-insert-data/_go_line.mdx"
|
||||
import GoOpenTSDBTelnet from "../07-develop/03-insert-data/_go_opts_telnet.mdx"
|
||||
import GoOpenTSDBJson from "../07-develop/03-insert-data/_go_opts_json.mdx"
|
||||
import GoQuery from "../07-develop/04-query-data/_go.mdx"
|
||||
|
||||
`driver-go` 是 TDengine 的官方 Go 语言连接器,实现了 Go 语言[ database/sql ](https://golang.org/pkg/database/sql/) 包的接口。Go 开发人员可以通过它开发存取 TDengine 集群数据的应用软件。
|
||||
|
||||
|
@ -30,7 +30,7 @@ REST 连接支持所有能运行 Go 的平台。
|
|||
|
||||
## 版本支持
|
||||
|
||||
请参考[版本支持列表](/reference/connector#版本支持)
|
||||
请参考[版本支持列表](../#版本支持)
|
||||
|
||||
## 支持的功能特性
|
||||
|
||||
|
@ -56,7 +56,7 @@ REST 连接支持所有能运行 Go 的平台。
|
|||
### 安装前准备
|
||||
|
||||
* 安装 Go 开发环境(Go 1.14 及以上,GCC 4.8.5 及以上)
|
||||
* 如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动)
|
||||
* 如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动)
|
||||
|
||||
配置好环境变量,检查命令:
|
||||
|
|
@ -9,9 +9,9 @@ import Tabs from '@theme/Tabs';
|
|||
import TabItem from '@theme/TabItem';
|
||||
|
||||
import Preparition from "./_preparition.mdx"
|
||||
import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx"
|
||||
import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx"
|
||||
import RustQuery from "../../07-develop/04-query-data/_rust.mdx"
|
||||
import RustInsert from "../07-develop/03-insert-data/_rust_sql.mdx"
|
||||
import RustBind from "../07-develop/03-insert-data/_rust_stmt.mdx"
|
||||
import RustQuery from "../07-develop/04-query-data/_rust.mdx"
|
||||
|
||||
[](https://crates.io/crates/taos)  [](https://docs.rs/taos)
|
||||
|
||||
|
@ -28,7 +28,7 @@ Websocket 连接支持所有能运行 Rust 的平台。
|
|||
|
||||
## 版本支持
|
||||
|
||||
请参考[版本支持列表](/reference/connector#版本支持)
|
||||
请参考[版本支持列表](../#版本支持)
|
||||
|
||||
Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容。建议使用 3.0 版本以上的 TDengine,以避免已知问题。
|
||||
|
||||
|
@ -37,7 +37,7 @@ Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容
|
|||
### 安装前准备
|
||||
|
||||
* 安装 Rust 开发工具链
|
||||
* 如果使用原生连接,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动)
|
||||
* 如果使用原生连接,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动)
|
||||
|
||||
### 添加 taos 依赖
|
||||
|
|
@ -8,7 +8,7 @@ description: "taospy 是 TDengine 的官方 Python 连接器。taospy 提供了
|
|||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
`taospy` 是 TDengine 的官方 Python 连接器。`taospy` 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。`taospy` 对 TDengine 的[原生接口](/reference/connector/cpp)和 [REST 接口](/reference/rest-api)都进行了封装, 分别对应 `taospy` 包的 `taos` 模块 和 `taosrest` 模块。
|
||||
`taospy` 是 TDengine 的官方 Python 连接器。`taospy` 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。`taospy` 对 TDengine 的[原生接口](../cpp)和 [REST 接口](../rest-api)都进行了封装, 分别对应 `taospy` 包的 `taos` 模块 和 `taosrest` 模块。
|
||||
除了对原生接口和 REST 接口的封装,`taospy` 还提供了符合 [Python 数据访问规范(PEP 249)](https://peps.python.org/pep-0249/) 的编程接口。这使得 `taospy` 和很多第三方工具集成变得简单,比如 [SQLAlchemy](https://www.sqlalchemy.org/) 和 [pandas](https://pandas.pydata.org/)。
|
||||
|
||||
使用客户端驱动提供的原生接口直接与服务端建立的连接的方式下文中称为“原生连接”;使用 taosAdapter 提供的 REST 接口与服务端建立的连接的方式下文中称为“REST 连接”。
|
||||
|
@ -17,7 +17,7 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con
|
|||
|
||||
## 支持的平台
|
||||
|
||||
- 原生连接[支持的平台](/reference/connector/#支持的平台)和 TDengine 客户端支持的平台一致。
|
||||
- 原生连接[支持的平台](../#支持的平台)和 TDengine 客户端支持的平台一致。
|
||||
- REST 连接支持所有能运行 Python 的平台。
|
||||
|
||||
## 版本选择
|
||||
|
@ -150,10 +150,19 @@ curl -u root:taosdata http://<FQDN>:<PORT>/rest/sql -d "select server_version()"
|
|||
|
||||
```json
|
||||
{
|
||||
"status": "succ",
|
||||
"head": ["server_version()"],
|
||||
"column_meta": [["server_version()", 8, 8]],
|
||||
"data": [["2.4.0.16"]],
|
||||
"code": 0,
|
||||
"column_meta": [
|
||||
[
|
||||
"server_version()",
|
||||
"VARCHAR",
|
||||
7
|
||||
]
|
||||
],
|
||||
"data": [
|
||||
[
|
||||
"3.0.0.0"
|
||||
]
|
||||
],
|
||||
"rows": 1
|
||||
}
|
||||
```
|
||||
|
@ -266,7 +275,7 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
|
|||
|
||||
##### RestClient 类的使用
|
||||
|
||||
`RestClient` 类是对于 [REST API](/reference/rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。
|
||||
`RestClient` 类是对于 [REST API](../rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。
|
||||
|
||||
```python title="RestClient 的使用"
|
||||
{{#include docs/examples/python/rest_client_example.py}}
|
|
@ -9,11 +9,11 @@ import Tabs from "@theme/Tabs";
|
|||
import TabItem from "@theme/TabItem";
|
||||
|
||||
import Preparition from "./_preparition.mdx";
|
||||
import NodeInsert from "../../07-develop/03-insert-data/_js_sql.mdx";
|
||||
import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx";
|
||||
import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx";
|
||||
import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx";
|
||||
import NodeQuery from "../../07-develop/04-query-data/_js.mdx";
|
||||
import NodeInsert from "../07-develop/03-insert-data/_js_sql.mdx";
|
||||
import NodeInfluxLine from "../07-develop/03-insert-data/_js_line.mdx";
|
||||
import NodeOpenTSDBTelnet from "../07-develop/03-insert-data/_js_opts_telnet.mdx";
|
||||
import NodeOpenTSDBJson from "../07-develop/03-insert-data/_js_opts_json.mdx";
|
||||
import NodeQuery from "../07-develop/04-query-data/_js.mdx";
|
||||
|
||||
`@tdengine/client` 和 `@tdengine/rest` 是 TDengine 的官方 Node.js 语言连接器。 Node.js 开发人员可以通过它开发可以存取 TDengine 集群数据的应用软件。注意:从 TDengine 3.0 开始 Node.js 原生连接器的包名由 `td2.0-connector` 改名为 `@tdengine/client` 而 rest 连接器的包名由 `td2.0-rest-connector` 改为 `@tdengine/rest`。并且不与 TDengine 2.x 兼容。
|
||||
|
||||
|
@ -28,7 +28,7 @@ REST 连接器支持所有能运行 Node.js 的平台。
|
|||
|
||||
## 版本支持
|
||||
|
||||
请参考[版本支持列表](/reference/connector#版本支持)
|
||||
请参考[版本支持列表](../#版本支持)
|
||||
|
||||
## 支持的功能特性
|
||||
|
||||
|
@ -52,7 +52,7 @@ REST 连接器支持所有能运行 Node.js 的平台。
|
|||
### 安装前准备
|
||||
|
||||
- 安装 Node.js 开发环境
|
||||
- 如果使用 REST 连接器,跳过此步。但如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动)。我们使用 [node-gyp](https://github.com/nodejs/node-gyp) 和 TDengine 实例进行交互,还需要根据具体操作系统来安装下文提到的一些依赖工具。
|
||||
- 如果使用 REST 连接器,跳过此步。但如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动)。我们使用 [node-gyp](https://github.com/nodejs/node-gyp) 和 TDengine 实例进行交互,还需要根据具体操作系统来安装下文提到的一些依赖工具。
|
||||
|
||||
<Tabs defaultValue="Linux">
|
||||
<TabItem value="Linux" label="Linux 系统安装依赖工具">
|
|
@ -9,16 +9,16 @@ import Tabs from '@theme/Tabs';
|
|||
import TabItem from '@theme/TabItem';
|
||||
|
||||
import Preparition from "./_preparition.mdx"
|
||||
import CSInsert from "../../07-develop/03-insert-data/_cs_sql.mdx"
|
||||
import CSInfluxLine from "../../07-develop/03-insert-data/_cs_line.mdx"
|
||||
import CSOpenTSDBTelnet from "../../07-develop/03-insert-data/_cs_opts_telnet.mdx"
|
||||
import CSOpenTSDBJson from "../../07-develop/03-insert-data/_cs_opts_json.mdx"
|
||||
import CSQuery from "../../07-develop/04-query-data/_cs.mdx"
|
||||
import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx"
|
||||
import CSInsert from "../07-develop/03-insert-data/_cs_sql.mdx"
|
||||
import CSInfluxLine from "../07-develop/03-insert-data/_cs_line.mdx"
|
||||
import CSOpenTSDBTelnet from "../07-develop/03-insert-data/_cs_opts_telnet.mdx"
|
||||
import CSOpenTSDBJson from "../07-develop/03-insert-data/_cs_opts_json.mdx"
|
||||
import CSQuery from "../07-develop/04-query-data/_cs.mdx"
|
||||
import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx"
|
||||
|
||||
`TDengine.Connector` 是 TDengine 提供的 C# 语言连接器。C# 开发人员可以通过它开发存取 TDengine 集群数据的 C# 应用软件。
|
||||
|
||||
`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、订阅、schemaless 数据写入、参数绑定接口数据写入等功能 `TDengine.Connector` 目前暂未提供 REST 连接方式,用户可以参考 [REST API](/reference/rest-api/) 文档自行编写。
|
||||
`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、订阅、schemaless 数据写入、参数绑定接口数据写入等功能 `TDengine.Connector` 目前暂未提供 REST 连接方式,用户可以参考 [REST API](../rest-api/) 文档自行编写。
|
||||
|
||||
本文介绍如何在 Linux 或 Windows 环境中安装 `TDengine.Connector`,并通过 `TDengine.Connector` 连接 TDengine 集群,进行数据写入、查询等基本操作。
|
||||
|
||||
|
@ -32,7 +32,7 @@ import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx"
|
|||
|
||||
## 版本支持
|
||||
|
||||
请参考[版本支持列表](/reference/connector#版本支持)
|
||||
请参考[版本支持列表](../#版本支持)
|
||||
|
||||
## 支持的功能特性
|
||||
|
||||
|
@ -49,7 +49,7 @@ import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx"
|
|||
|
||||
* 安装 [.NET SDK](https://dotnet.microsoft.com/download)
|
||||
* [Nuget 客户端](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (可选安装)
|
||||
* 安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动)
|
||||
* 安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动)
|
||||
|
||||
### 使用 dotnet CLI 安装
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: PHP
|
||||
sidebar_label: PHP(社区贡献)
|
||||
title: PHP Connector
|
||||
---
|
||||
|
||||
|
@ -38,7 +38,7 @@ TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一
|
|||
|
||||
### 安装 TDengine 客户端驱动
|
||||
|
||||
TDengine 客户端驱动的安装请参考 [安装指南](/reference/connector#安装步骤)
|
||||
TDengine 客户端驱动的安装请参考 [安装指南](../#安装步骤)
|
||||
|
||||
### 编译安装 php-tdengine
|
||||
|
||||
|
@ -61,7 +61,7 @@ phpize && ./configure && make -j && make install
|
|||
**手动指定 tdengine 目录:**
|
||||
|
||||
```shell
|
||||
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/2.4.0.0 && make -j && make install
|
||||
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
|
||||
```
|
||||
|
||||
> `--with-tdengine-dir=` 后跟上 tdengine 目录。
|
|
@ -4,7 +4,7 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
|
||||
<PkgListV3 type={1} sys="Linux" />
|
||||
|
||||
[所有下载](../../releases)
|
||||
[所有下载](../../releases/tdengine)
|
||||
|
||||
2. 解压缩软件包
|
||||
|
|
@ -4,7 +4,8 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
|
||||
<PkgListV3 type={4} sys="Windows" />
|
||||
|
||||
[所有下载](../../releases)
|
||||
[所有下载](../../releases/tdengine)
|
||||
|
||||
2. 执行安装程序,按提示选择默认值,完成安装
|
||||
3. 安装路径
|
||||
|
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 25 KiB |
|
@ -1,5 +1,7 @@
|
|||
---
|
||||
sidebar_label: 连接器
|
||||
title: 连接器
|
||||
description: 详细介绍各种语言的连接器及 REST API
|
||||
---
|
||||
|
||||
TDengine 提供了丰富的应用程序开发接口,为了便于用户快速开发自己的应用,TDengine 支持了多种编程语言的连接器,其中官方连接器包括支持 C/C++、Java、Python、Go、Node.js、C# 和 Rust 的连接器。这些连接器支持使用原生接口(taosc)和 REST 接口(部分语言暂不支持)连接 TDengine 集群。社区开发者也贡献了多个非官方连接器,例如 ADO.NET 连接器、Lua 连接器和 PHP 连接器。
|
Before Width: | Height: | Size: 81 KiB After Width: | Height: | Size: 81 KiB |
|
@ -846,7 +846,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
### INTERP
|
||||
|
||||
```sql
|
||||
SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
|
||||
SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] RANGE(timestamp1,timestamp2) EVERY(interval) FILL({ VALUE | PREV | NULL | LINEAR | NEXT});
|
||||
```
|
||||
|
||||
**功能说明**:返回指定时间截面指定列的记录值或插值。
|
||||
|
@ -855,17 +855,16 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [
|
|||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**适用于**:表、超级表。
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**
|
||||
|
||||
- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。
|
||||
- INTERP 的输入数据为指定列的数据,可以通过条件语句(where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。
|
||||
- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1<=timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。如果没有指定 RANGE,那么满足过滤条件的输入数据中第一条记录的 timestamp 即为 timestamp1,最后一条记录的 timestamp 即为 timestamp2,同样也满足 timestamp1 <= timestamp2。
|
||||
- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1<=timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
|
||||
- INTERP 根据 EVERY 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(EVERY 值)进行插值。如果没有指定 EVERY,则默认窗口大小为无穷大,即从 timestamp1 开始只有一个窗口。
|
||||
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值,如果没有 FILL 字段则默认不插值,即输出为原始记录值或不输出(原始记录不存在)。
|
||||
- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 group by tbname 一起使用,当作用嵌套查询外层时内层子查询不能含 GROUP BY 信息。
|
||||
- INTERP 的插值结果不受 ORDER BY timestamp 的影响,ORDER BY timestamp 只影响输出结果的排序。
|
||||
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。
|
||||
- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用。
|
||||
|
||||
### LAST
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ stream_options: {
|
|||
其中 subquery 是 select 普通查询语法的子集:
|
||||
|
||||
```sql
|
||||
subquery: SELECT [DISTINCT] select_list
|
||||
subquery: SELECT select_list
|
||||
from_clause
|
||||
[WHERE condition]
|
||||
[PARTITION BY tag_list]
|
||||
|
@ -37,7 +37,7 @@ window_clause: {
|
|||
|
||||
其中,SESSION 是会话窗口,tol_val 是时间间隔的最大范围。在 tol_val 时间间隔范围内的数据都属于同一个窗口,如果连续的两条数据的时间超过 tol_val,则自动开启下一个窗口。
|
||||
|
||||
窗口的定义与时序数据特色查询中的定义完全相同。
|
||||
窗口的定义与时序数据特色查询中的定义完全相同,详见 [TDengine 特色查询](../distinguished)
|
||||
|
||||
例如,如下语句创建流式计算,同时自动创建名为 avg_vol 的超级表,此流计算以一分钟为时间窗口、30 秒为前向增量统计这些电表的平均电压,并将来自 meters 表的数据的计算结果写入 avg_vol 表,不同 partition 的数据会分别创建子表并写入不同子表。
|
||||
|
||||
|
|
|
@ -15,9 +15,6 @@ title: 转义字符说明
|
|||
| `\%` | % 规则见下 |
|
||||
| `\_` | \_ 规则见下 |
|
||||
|
||||
:::note
|
||||
转义符的功能从 2.4.0.4 版本开始
|
||||
|
||||
:::
|
||||
|
||||
## 转义字符使用规则
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
label: REST API
|
|
@ -30,7 +30,7 @@ taosAdapter 提供以下功能:
|
|||
|
||||
### 安装 taosAdapter
|
||||
|
||||
taosAdapter 从 TDengine v2.4.0.0 版本开始成为 TDengine 服务端软件 的一部分,如果您使用 TDengine server 您不需要任何额外的步骤来安装 taosAdapter。您可以从[涛思数据官方网站](https://taosdata.com/cn/all-downloads/)下载 TDengine server(taosAdapter 包含在 v2.4.0.0 及以上版本)安装包。如果需要将 taosAdapter 分离部署在 TDengine server 之外的服务器上,则应该在该服务器上安装完整的 TDengine 来安装 taosAdapter。如果您需要使用源代码编译生成 taosAdapter,您可以参考[构建 taosAdapter](https://github.com/taosdata/taosadapter/blob/3.0/BUILD-CN.md)文档。
|
||||
taosAdapter 是 TDengine 服务端软件 的一部分,如果您使用 TDengine server 您不需要任何额外的步骤来安装 taosAdapter。您可以从[涛思数据官方网站](https://taosdata.com/cn/all-downloads/)下载 TDengine server 安装包。如果需要将 taosAdapter 分离部署在 TDengine server 之外的服务器上,则应该在该服务器上安装完整的 TDengine 来安装 taosAdapter。如果您需要使用源代码编译生成 taosAdapter,您可以参考[构建 taosAdapter](https://github.com/taosdata/taosadapter/blob/3.0/BUILD-CN.md)文档。
|
||||
|
||||
### start/stop taosAdapter
|
||||
|
||||
|
@ -156,7 +156,7 @@ AllowWebSockets
|
|||
## 功能列表
|
||||
|
||||
- RESTful 接口
|
||||
[https://docs.taosdata.com/reference/rest-api/](https://docs.taosdata.com/reference/rest-api/)
|
||||
[RESTful API](../../connector/rest-api)
|
||||
- 兼容 InfluxDB v1 写接口
|
||||
[https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/)
|
||||
- 兼容 OpenTSDB JSON 和 telnet 格式写入
|
||||
|
@ -179,7 +179,7 @@ AllowWebSockets
|
|||
|
||||
### TDengine RESTful 接口
|
||||
|
||||
您可以使用任何支持 http 协议的客户端通过访问 RESTful 接口地址 `http://<fqdn>:6041/rest/sql` 来写入数据到 TDengine 或从 TDengine 中查询数据。细节请参考[官方文档](/reference/rest-api/)。
|
||||
您可以使用任何支持 http 协议的客户端通过访问 RESTful 接口地址 `http://<fqdn>:6041/rest/sql` 来写入数据到 TDengine 或从 TDengine 中查询数据。细节请参考[官方文档](../../connector/rest-api/)。
|
||||
|
||||
### InfluxDB
|
||||
|
||||
|
|
Before Width: | Height: | Size: 27 KiB After Width: | Height: | Size: 19 KiB |
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 5.5 KiB |
Before Width: | Height: | Size: 8.1 KiB After Width: | Height: | Size: 6.9 KiB |
Before Width: | Height: | Size: 94 KiB After Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 7.5 KiB After Width: | Height: | Size: 7.1 KiB |
Before Width: | Height: | Size: 16 KiB After Width: | Height: | Size: 10 KiB |
Before Width: | Height: | Size: 11 KiB After Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 16 KiB After Width: | Height: | Size: 6.2 KiB |
After Width: | Height: | Size: 5.3 KiB |
After Width: | Height: | Size: 23 KiB |
|
@ -3,19 +3,28 @@ title: TDinsight - 基于Grafana的TDengine零依赖监控解决方案
|
|||
sidebar_label: TDinsight
|
||||
---
|
||||
|
||||
TDinsight 是使用内置监控数据库和 [Grafana] 对 TDengine 进行监控的解决方案。
|
||||
TDinsight 是使用监控数据库和 [Grafana] 对 TDengine 进行监控的解决方案。
|
||||
|
||||
TDengine 启动后,会自动创建一个监测数据库 `log`,并自动将服务器的 CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库,并对重要的系统操作(比如登录、创建、删除数据库等)以及各种错误报警信息进行记录。通过 [Grafana] 和 [TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases),TDinsight 将集群状态、节点信息、插入及查询请求、资源使用情况等进行可视化展示,同时还支持 vnode、dnode、mnode 节点状态异常告警,为开发者实时监控 TDengine 集群运行状态提供了便利。本文将指导用户安装 Grafana 服务器并通过 `TDinsight.sh` 安装脚本自动安装 TDengine 数据源插件及部署 TDinsight 可视化面板。
|
||||
TDengine 通过 [taosKeeper](../taosKeeper) 将服务器的 CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入指定数据库,并对重要的系统操作(比如登录、创建、删除数据库等)以及各种错误报警信息进行记录。通过 [Grafana] 和 [TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases),TDinsight 将集群状态、节点信息、插入及查询请求、资源使用情况等进行可视化展示,同时还支持 vnode、dnode、mnode 节点状态异常告警,为开发者实时监控 TDengine 集群运行状态提供了便利。本文将指导用户安装 Grafana 服务器并通过 `TDinsight.sh` 安装脚本自动安装 TDengine 数据源插件及部署 TDinsight 可视化面板。
|
||||
|
||||
## 系统要求
|
||||
|
||||
要部署 TDinsight,需要一个单节点的 TDengine 服务器或一个多节点的 [TDengine] 集群,以及一个[Grafana]服务器。此仪表盘需要 TDengine 2.3.3.0 及以上,并启用 `log` 数据库(`monitor = 1`)。
|
||||
- 单节点的 TDengine 服务器或多节点的 [TDengine] 集群,以及一个[Grafana]服务器。此仪表盘需要 TDengine 3.0.0.0 及以上,并开启监控服务,具体配置请参考:[TDengine 监控配置](../config/#监控相关)。
|
||||
- taosAdapter 已经安装并正常运行。具体细节请参考:[taosAdapter 使用手册](../taosadapter)
|
||||
- taosKeeper 已安装并正常运行。具体细节请参考:[taosKeeper 使用手册](../taosKeeper)
|
||||
|
||||
记录以下信息:
|
||||
|
||||
- taosAdapter 集群 REST API 地址,如:`http://tdengine.local:6041`。
|
||||
- taosAdapter 集群认证信息,可使用用户名及密码。
|
||||
- taosKeeper 记录监控指标的数据库名称。
|
||||
|
||||
## 安装 Grafana
|
||||
|
||||
我们建议在此处使用最新的[Grafana] 7 或 8 版本。您可以在任何[支持的操作系统](https://grafana.com/docs/grafana/latest/installation/requirements/#supported-operating-systems)中,按照 [Grafana 官方文档安装说明](https://grafana.com/docs/grafana/latest/installation/) 安装 [Grafana]。
|
||||
我们建议在此处使用最新的[Grafana] 8 或 9 版本。您可以在任何[支持的操作系统](https://grafana.com/docs/grafana/latest/installation/requirements/#supported-operating-systems)中,按照 [Grafana 官方文档安装说明](https://grafana.com/docs/grafana/latest/installation/) 安装 [Grafana]。
|
||||
|
||||
### 在 Debian 或 Ubuntu 上安装 Grafana
|
||||
<Tabs defaultValue="debian" groupId="install">
|
||||
<TabItem value="debian" label="基于 Debian 或 Ubuntu 系统">
|
||||
|
||||
对于 Debian 或 Ubuntu 操作系统,建议使用 Grafana 镜像仓库。使用如下命令从零开始安装:
|
||||
|
||||
|
@ -31,6 +40,8 @@ sudo apt-get install grafana
|
|||
```
|
||||
|
||||
### 在 CentOS / RHEL 上安装 Grafana
|
||||
</TabItem>
|
||||
<TabItem label="redhat" value="基于 CentOS / RHEL 系统">
|
||||
|
||||
您可以从官方 YUM 镜像仓库安装。
|
||||
|
||||
|
@ -59,7 +70,12 @@ sudo yum install \
|
|||
https://dl.grafana.com/oss/release/grafana-7.5.11-1.x86_64.rpm
|
||||
```
|
||||
|
||||
## 自动部署 TDinsight
|
||||
</TabItem>
|
||||
|
||||
</Tabs>
|
||||
|
||||
<Tabs defaultValue="auto" groupId="deploy">
|
||||
<TabItem value="auto" label="自动部署 TDinsight">
|
||||
|
||||
我们提供了一个自动化安装脚本 [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh) 脚本以便用户快速进行安装配置。
|
||||
|
||||
|
@ -71,7 +87,7 @@ chmod +x TDinsight.sh
|
|||
./TDinsight.sh
|
||||
```
|
||||
|
||||
这个脚本会自动下载最新的[Grafana TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases/latest) 和 [TDinsight 仪表盘](https://grafana.com/grafana/dashboards/15167) ,将命令行选项中的可配置参数转为 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置文件,以进行自动化部署及更新等操作。利用该脚本提供的告警设置选项,你还可以获得内置的阿里云短信告警通知支持。
|
||||
这个脚本会自动下载最新的[Grafana TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases/latest) 和 [TDinsight 仪表盘](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) ,将命令行选项中的可配置参数转为 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置文件,以进行自动化部署及更新等操作。利用该脚本提供的告警设置选项,你还可以获得内置的阿里云短信告警通知支持。
|
||||
|
||||
假设您在同一台主机上使用 TDengine 和 Grafana 的默认服务。运行 `./TDinsight.sh` 并打开 Grafana 浏览器窗口就可以看到 TDinsight 仪表盘了。
|
||||
|
||||
|
@ -106,18 +122,6 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
|
|||
|
||||
-E, --external-notifier <string> Apply external notifier uid to TDinsight dashboard.
|
||||
|
||||
Aliyun SMS as Notifier:
|
||||
-s, --sms-enabled To enable tdengine-datasource plugin builtin Aliyun SMS webhook.
|
||||
-N, --sms-notifier-name <string> Provisioning notifier name.[default: TDinsight Builtin SMS]
|
||||
-U, --sms-notifier-uid <string> Provisioning notifier uid, use lowercase notifier name by default.
|
||||
-D, --sms-notifier-is-default Set notifier as default.
|
||||
-I, --sms-access-key-id <string> Aliyun SMS access key id
|
||||
-K, --sms-access-key-secret <string> Aliyun SMS access key secret
|
||||
-S, --sms-sign-name <string> Sign name
|
||||
-C, --sms-template-code <string> Template code
|
||||
-T, --sms-template-param <string> Template param, a escaped JSON string like '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}'
|
||||
-B, --sms-phone-numbers <string> Comma-separated numbers list, eg "189xxxxxxxx,132xxxxxxxx"
|
||||
-L, --sms-listen-addr <string> [default: 127.0.0.1:9100]
|
||||
```
|
||||
|
||||
大多数命令行选项都可以通过环境变量获得同样的效果。
|
||||
|
@ -136,17 +140,6 @@ Aliyun SMS as Notifier:
|
|||
| -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight 仪表盘标题。 [默认:TDinsight] |
|
||||
| -e | --tdinsight-可编辑 | TDINSIGHT_DASHBOARD_EDITABLE | 如果配置仪表盘可以编辑。 [默认值:false] |
|
||||
| -E | --external-notifier | EXTERNAL_NOTIFIER | 将外部通知程序 uid 应用于 TDinsight 仪表盘。 |
|
||||
| -s | --sms-enabled | SMS_ENABLED | 启用阿里云短信 webhook 内置的 tdengine-datasource 插件。 |
|
||||
| -N | --sms-notifier-name | SMS_NOTIFIER_NAME | 供应通知程序名称。[默认:`TDinsight Builtin SMS`] |
|
||||
| -U | --sms-notifier-uid | SMS_NOTIFIER_UID | "Notification Channel" `uid`,默认使用程序名称的小写,其他字符用 “-” 代替。 |
|
||||
| -D | --sms-notifier-is-default | SMS_NOTIFIER_IS_DEFAULT | 将内置短信通知设置为默认值。 |
|
||||
| -I | --sms-access-key-id | SMS_ACCESS_KEY_ID | 阿里云短信访问密钥 id |
|
||||
| -K | --sms-access-key-secret | SMS_ACCESS_KEY_SECRET | 阿里云短信访问秘钥 |
|
||||
| -S | --sms-sign-name | SMS_SIGN_NAME | 签名 |
|
||||
| -C | --sms-template-code | SMS_TEMPLATE_CODE | 模板代码 |
|
||||
| -T | --sms-template-param | SMS_TEMPLATE_PARAM | 模板参数的 JSON 模板 |
|
||||
| -B | --sms-phone-numbers | SMS_PHONE_NUMBERS | 逗号分隔的手机号列表,例如`"189xxxxxxxx,132xxxxxxxx"` |
|
||||
| -L | --sms-listen-addr | SMS_LISTEN_ADDR | 内置 SMS webhook 监听地址,默认为`127.0.0.1:9100` |
|
||||
|
||||
假设您在主机 `tdengine` 上启动 TDengine 数据库,HTTP API 端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本:
|
||||
|
||||
|
@ -166,31 +159,18 @@ curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifica
|
|||
sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
||||
```
|
||||
|
||||
如果你想使用[阿里云短信](https://www.aliyun.com/product/sms)服务作为通知渠道,你应该使用`-s`标志启用并添加以下参数:
|
||||
|
||||
- `-N`:Notification Channel 名,默认为`TDinsight Builtin SMS`。
|
||||
- `-U`:Channel uid,默认是 `name` 的小写,任何其他字符都替换为 - ,对于默认的 `-N`,其 uid 为 `tdinsight-builtin-sms`。
|
||||
- `-I`:阿里云短信访问密钥 id。
|
||||
- `-K`:阿里云短信访问秘钥。
|
||||
- `-S`:阿里云短信签名。
|
||||
- `-C`:阿里云短信模板 ID。
|
||||
- `-T`:阿里云短信模板参数,为 JSON 格式模板,示例如下 `'{"alarm_level":"%s","time":"%s","name":"%s","content":"%s "}'`。有四个参数:告警级别、时间、名称和告警内容。
|
||||
- `-B`:电话号码列表,以逗号`,`分隔。
|
||||
|
||||
如果要监控多个 TDengine 集群,则需要设置多个 TDinsight 仪表盘。设置非默认 TDinsight 需要进行一些更改: `-n` `-i` `-t` 选项需要更改为非默认名称,如果使用 内置短信告警功能,`-N` 和 `-L` 也应该改变。
|
||||
|
||||
```bash
|
||||
sudo ./TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1'
|
||||
# 如果使用内置短信通知
|
||||
sudo ./TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1' \
|
||||
-s -N 'Env1 SMS' -I xx -K xx -S xx -C SMS_XX -T '' -B 00000000000 -L 127.0.0.01:10611
|
||||
```
|
||||
|
||||
请注意,配置数据源、通知 Channel 和仪表盘在前端是不可更改的。您应该再次通过此脚本更新配置或手动更改 `/etc/grafana/provisioning` 目录(这是 Grafana 的默认目录,根据需要使用`-P`选项更改)中的配置文件。
|
||||
|
||||
特别地,当您使用 Grafana Cloud 或其他组织时,`-O` 可用于设置组织 ID。 `-G` 可指定 Grafana 插件安装目录。 `-e` 参数将仪表盘设置为可编辑。
|
||||
|
||||
## 手动设置 TDinsight
|
||||
</TabItem>
|
||||
<TabItem label="manual" value="手动设置 TDinsight">
|
||||
|
||||
### 安装 TDengine 数据源插件
|
||||
|
||||
|
@ -247,23 +227,30 @@ sudo systemctl enable grafana-server
|
|||
|
||||

|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### 导入仪表盘
|
||||
|
||||
指向 **+** / **Create** - **import**(或 `/dashboard/import` url)。
|
||||
在配置 TDengine 数据源界面,点击 **Dashboards** tab。
|
||||
|
||||

|
||||
|
||||
在 **Import via grafana.com** 位置键入仪表盘 ID `15167` 并 **Load**。
|
||||
选择 `TDengine for 3.x`,并点击 `import`。
|
||||
|
||||

|
||||
导入完成后,在搜索界面已经出现了 **TDinsight for 3.x** dashboard。
|
||||
|
||||
导入完成后,TDinsight 的完整页面视图如下所示。
|
||||

|
||||
|
||||

|
||||
进入 TDinsight for 3.x dashboard 后,选择 taosKeeper 中设置的记录监控指标的数据库。
|
||||
|
||||

|
||||
|
||||
然后可以看到监控结果。
|
||||
|
||||
## TDinsight 仪表盘详细信息
|
||||
|
||||
TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes, vnodes](https://www.taosdata.com/cn/documentation/architecture#cluster)或数据库的使用情况和状态。
|
||||
TDinsight 仪表盘旨在提供 TDengine 相关资源的使用情况和状态,比如 dnodes、 mnodes、 vnodes 和数据库等。
|
||||
|
||||
指标详情如下:
|
||||
|
||||
|
@ -285,7 +272,6 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
|
|||
- **Measuring Points Used**:启用告警规则的测点数用量(社区版无数据,默认情况下是健康的)。
|
||||
- **Grants Expire Time**:启用告警规则的企业版过期时间(社区版无数据,默认情况是健康的)。
|
||||
- **Error Rate**:启用警报的集群总合错误率(每秒平均错误数)。
|
||||
- **Variables**:`show variables` 表格展示。
|
||||
|
||||
### DNodes 状态
|
||||
|
||||
|
@ -294,7 +280,6 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
|
|||
- **DNodes Status**:`show dnodes` 的简单表格视图。
|
||||
- **DNodes Lifetime**:从创建 dnode 开始经过的时间。
|
||||
- **DNodes Number**:DNodes 数量变化。
|
||||
- **Offline Reason**:如果有任何 dnode 状态为离线,则以饼图形式展示离线原因。
|
||||
|
||||
### MNode 概述
|
||||
|
||||
|
@ -309,7 +294,6 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
|
|||
|
||||
1. **Requests Rate(Inserts per Second)**:平均每秒插入次数。
|
||||
2. **Requests (Selects)**:查询请求数及变化率(count of second)。
|
||||
3. **Requests (HTTP)**:HTTP 请求数和请求速率(count of second)。
|
||||
|
||||
### 数据库
|
||||
|
||||
|
@ -319,9 +303,8 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
|
|||
|
||||
1. **STables**:超级表数量。
|
||||
2. **Total Tables**:所有表数量。
|
||||
3. **Sub Tables**:所有超级表子表的数量。
|
||||
4. **Tables**:所有普通表数量随时间变化图。
|
||||
5. **Tables Number Foreach VGroups**:每个 VGroups 包含的表数量。
|
||||
3. **Tables**:所有普通表数量随时间变化图。
|
||||
4. **Tables Number Foreach VGroups**:每个 VGroups 包含的表数量。
|
||||
|
||||
### DNode 资源使用情况
|
||||
|
||||
|
@ -356,12 +339,11 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
|
|||
|
||||
支持监控 taosAdapter 请求统计和状态详情。包括:
|
||||
|
||||
1. **http_request**: 包含总请求数,请求失败数以及正在处理的请求数
|
||||
2. **top 3 request endpoint**: 按终端分组,请求排名前三的数据
|
||||
3. **Memory Used**: taosAdapter 内存使用情况
|
||||
4. **latency_quantile(ms)**: (1, 2, 5, 9, 99)阶段的分位数
|
||||
5. **top 3 failed request endpoint**: 按终端分组,请求失败排名前三的数据
|
||||
6. **CPU Used**: taosAdapter CPU 使用情况
|
||||
1. **http_request_inflight**: 即时处理请求数
|
||||
2. **http_request_total**: 请求总数。
|
||||
3. **http_request_fail**: 请求总数。
|
||||
4. **CPU Used**: taosAdapter CPU 使用情况。
|
||||
5. **Memory Used**: taosAdapter 内存使用情况。
|
||||
|
||||
## 升级
|
||||
|
||||
|
@ -403,13 +385,6 @@ services:
|
|||
TDENGINE_API: ${TDENGINE_API}
|
||||
TDENGINE_USER: ${TDENGINE_USER}
|
||||
TDENGINE_PASS: ${TDENGINE_PASS}
|
||||
SMS_ACCESS_KEY_ID: ${SMS_ACCESS_KEY_ID}
|
||||
SMS_ACCESS_KEY_SECRET: ${SMS_ACCESS_KEY_SECRET}
|
||||
SMS_SIGN_NAME: ${SMS_SIGN_NAME}
|
||||
SMS_TEMPLATE_CODE: ${SMS_TEMPLATE_CODE}
|
||||
SMS_TEMPLATE_PARAM: '${SMS_TEMPLATE_PARAM}'
|
||||
SMS_PHONE_NUMBERS: $SMS_PHONE_NUMBERS
|
||||
SMS_LISTEN_ADDR: ${SMS_LISTEN_ADDR}
|
||||
ports:
|
||||
- 3000:3000
|
||||
volumes:
|
|
@ -8,7 +8,7 @@ TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine
|
|||
|
||||
## 安装
|
||||
|
||||
如果在 TDengine 服务器端执行,无需任何安装,已经自动安装好 TDengine CLI。如果要在非 TDengine 服务器端运行,需要安装 TDengine 客户端驱动安装包,具体安装,请参考 [连接器](/reference/connector/)。
|
||||
如果在 TDengine 服务器端执行,无需任何安装,已经自动安装好 TDengine CLI。如果要在非 TDengine 服务器端运行,需要安装 TDengine 客户端驱动安装包,具体安装,请参考 [连接器](../../connector/)。
|
||||
|
||||
## 执行
|
||||
|
||||
|
@ -18,7 +18,7 @@ TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine
|
|||
taos
|
||||
```
|
||||
|
||||
如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息。(请参考 [FAQ](/train-faq/faq) 来解决终端连接服务端失败的问题)。TDengine CLI 的提示符号如下:
|
||||
如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息。(请参考 [FAQ](../../train-faq/faq) 来解决终端连接服务端失败的问题)。TDengine CLI 的提示符号如下:
|
||||
|
||||
```cmd
|
||||
taos>
|
||||
|
|
|
@ -30,7 +30,7 @@ TDengine 的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
|
|||
- _taosd-dump-cfg.gdb_:用于方便调试 taosd 的 gdb 执行脚本。
|
||||
|
||||
:::note
|
||||
2.4.0.0 版本之后的 taosBenchmark 和 taosdump 需要安装独立安装包 taosTools。
|
||||
taosdump 需要安装独立安装包 taosTools。
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -2,13 +2,13 @@
|
|||
title: 系统监控
|
||||
---
|
||||
|
||||
TDengine 启动后,会自动创建一个监测数据库 log,并自动将服务器的 CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine 还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息记录下来存放在 log 库里。系统管理员可以从 CLI 直接查看这个数据库,也可以在 WEB 通过图形化界面查看这些监测信息。
|
||||
TDengine 通过 [taosKeeper](/reference/taosKeeper/) 将服务器的 CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度等信息定时写入指定数据库。TDengine 还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息进行记录。系统管理员可以从 CLI 直接查看这个数据库,也可以在 WEB 通过图形化界面查看这些监测信息。
|
||||
|
||||
这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项 monitor 将其关闭或打开。
|
||||
|
||||
## TDinsight - 使用监控数据库 + Grafana 对 TDengine 进行监控的解决方案
|
||||
|
||||
从 2.3.3.0 开始,监控数据库将提供更多的监控项,您可以从 [TDinsight Grafana Dashboard](https://grafana.com/grafana/dashboards/15167) 了解如何使用 TDinsight 方案对 TDengine 进行监控。
|
||||
监控数据库将提供更多的监控项,您可以从 [TDinsight Grafana Dashboard](/reference/tdinsight/) 了解如何使用 TDinsight 方案对 TDengine 进行监控。
|
||||
|
||||
我们提供了一个自动化脚本 `TDinsight.sh` 对 TDinsight 进行部署。
|
||||
|
||||
|
@ -34,21 +34,6 @@ chmod +x TDinsight.sh
|
|||
sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
|
||||
```
|
||||
|
||||
- 使用 TDengine 数据源插件内置的阿里云短信告警通知,使用 `-s` 启用之,并设置如下参数:
|
||||
|
||||
1. 阿里云短信服务 Key ID,参数 `-I`
|
||||
2. 阿里云短信服务 Key Secret,参数 `K`
|
||||
3. 阿里云短信服务签名,参数 `-S`
|
||||
4. 短信通知模板号,参数 `-C`
|
||||
5. 短信通知模板输入参数,JSON 格式,参数 `-T`,如 `{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}`
|
||||
6. 逗号分隔的通知手机列表,参数 `-B`
|
||||
|
||||
```bash
|
||||
sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -s \
|
||||
-I XXXXXXX -K XXXXXXXX -S taosdata -C SMS_1111111 -B 18900000000 \
|
||||
-T '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}'
|
||||
```
|
||||
|
||||
运行程序并重启 Grafana 服务,打开面板:`http://localhost:3000/d/tdinsight`。
|
||||
|
||||
更多使用场景和限制请参考[TDinsight](/reference/tdinsight/) 文档。
|
||||
|
|
|
@ -90,7 +90,7 @@ http://127.0.0.1:6041/rest/sql
|
|||
```
|
||||
Basic cm9vdDp0YW9zZGF0YQ==
|
||||
```
|
||||
相关文档请参考[ TDengine REST API 文档](/reference/rest-api/)。
|
||||
相关文档请参考[ TDengine REST API 文档](../../connector/rest-api/)。
|
||||
|
||||
在消息体中输入规则引擎替换模板:
|
||||
|
||||
|
|
|
@ -184,7 +184,7 @@ echo `cat /tmp/confluent.current`/connect/connect.stdout
|
|||
|
||||
TDengine Sink Connector 的作用是同步指定 topic 的数据到 TDengine。用户无需提前创建数据库和超级表。可手动指定目标数据库的名字(见配置参数 connection.database), 也可按一定规则生成(见配置参数 connection.database.prefix)。
|
||||
|
||||
TDengine Sink Connector 内部使用 TDengine [无模式写入接口](/reference/connector/cpp#无模式写入-api)写数据到 TDengine,目前支持三种格式的数据:[InfluxDB 行协议格式](/develop/insert-data/influxdb-line)、 [OpenTSDB Telnet 协议格式](/develop/insert-data/opentsdb-telnet) 和 [OpenTSDB JSON 协议格式](/develop/insert-data/opentsdb-json)。
|
||||
TDengine Sink Connector 内部使用 TDengine [无模式写入接口](../../connector/cpp#无模式写入-api)写数据到 TDengine,目前支持三种格式的数据:[InfluxDB 行协议格式](/develop/insert-data/influxdb-line)、 [OpenTSDB Telnet 协议格式](/develop/insert-data/opentsdb-telnet) 和 [OpenTSDB JSON 协议格式](/develop/insert-data/opentsdb-json)。
|
||||
|
||||
下面的示例将主题 meters 的数据,同步到目标数据库 power。数据格式为 InfluxDB Line 协议格式。
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
|
|||
|
||||
### TDengine
|
||||
|
||||
从涛思数据官网[下载](http://taosdata.com/cn/all-downloads/)页面下载最新 TDengine-server 2.4.0.x 或以上版本安装。
|
||||
从涛思数据官网[下载](http://taosdata.com/cn/all-downloads/)页面下载最新 TDengine-server 版本安装。
|
||||
|
||||
## 数据链路设置
|
||||
|
||||
|
@ -79,4 +79,4 @@ sudo systemctl start telegraf
|
|||
|
||||
## 总结
|
||||
|
||||
以上演示如何快速搭建一个完整的 IT 运维展示系统。得力于 TDengine 2.4.0.0 版本中新增的 schemaless 协议解析功能,以及强大的生态软件适配能力,用户可以短短数分钟就可以搭建一个高效易用的 IT 运维系统。TDengine 强大的数据写入查询性能和其他丰富功能请参考官方文档和产品落地案例。
|
||||
以上演示如何快速搭建一个完整的 IT 运维展示系统。得力于 TDengine 的 schemaless 协议解析功能,以及强大的生态软件适配能力,用户可以短短数分钟就可以搭建一个高效易用的 IT 运维系统。TDengine 强大的数据写入查询性能和其他丰富功能请参考官方文档和产品落地案例。
|
||||
|
|
|
@ -36,7 +36,7 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
|
|||
|
||||
### 安装 TDengine
|
||||
|
||||
从涛思数据官网[下载](http://taosdata.com/cn/all-downloads/)页面下载最新 TDengine-server 2.4.0.x 或以上版本安装。
|
||||
从涛思数据官网[下载](http://taosdata.com/cn/all-downloads/)页面下载最新 TDengine-server 版本安装。
|
||||
|
||||
## 数据链路设置
|
||||
|
||||
|
@ -90,6 +90,6 @@ repeater 部分添加 { host:'<TDengine server/cluster host>', port: <port for S
|
|||
|
||||
## 总结
|
||||
|
||||
TDengine 作为新兴的时序大数据平台,具备极强的高性能、高可靠、易管理、易维护的优势。得力于 TDengine 2.4.0.0 版本中新增的 schemaless 协议解析功能,以及强大的生态软件适配能力,用户可以短短数分钟就可以搭建一个高效易用的 IT 运维系统或者适配一个已存在的系统。
|
||||
TDengine 作为新兴的时序大数据平台,具备极强的高性能、高可靠、易管理、易维护的优势。得力于 TDengine 的 schemaless 协议解析功能,以及强大的生态软件适配能力,用户可以短短数分钟就可以搭建一个高效易用的 IT 运维系统或者适配一个已存在的系统。
|
||||
|
||||
TDengine 强大的数据写入查询性能和其他丰富功能请参考官方文档和产品成功落地案例。
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
---
|
||||
sidebar_label: 发布历史
|
||||
title: 发布历史
|
||||
---
|
||||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
|
||||
<Release versionPrefix="3.0" />
|
|
@ -0,0 +1,15 @@
|
|||
---
|
||||
sidebar_label: TDengine 发布历史
|
||||
title: TDengine 发布历史
|
||||
---
|
||||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.0.0.1
|
||||
|
||||
<Release type="tdengine" version="3.0.0.1" />
|
||||
|
||||
## 3.0.0.0
|
||||
|
||||
<Release type="tdengine" version="3.0.0.0" />
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
sidebar_label: taosTools 发布历史
|
||||
title: taosTools 发布历史
|
||||
---
|
||||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 2.1.2
|
||||
|
||||
<Release type="tools" version="2.1.2" />
|
|
@ -0,0 +1 @@
|
|||
label: 发布历史
|
|
@ -13,6 +13,7 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
// clang-format off
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
@ -94,13 +95,8 @@ int32_t create_stream() {
|
|||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
/*const char* sql = "select min(k), max(k), sum(k) from tu1";*/
|
||||
/*const char* sql = "select min(k), max(k), sum(k) as sum_of_k from st1";*/
|
||||
/*const char* sql = "select sum(k) from tu1 interval(10m)";*/
|
||||
/*pRes = tmq_create_stream(pConn, "stream1", "out1", sql);*/
|
||||
pRes = taos_query(pConn,
|
||||
"create stream stream1 trigger max_delay 10s watermark 10s into outstb as select _wstart start, "
|
||||
"count(k) from st1 partition by tbname interval(20s) ");
|
||||
"create stream stream1 trigger at_once watermark 10s into outstb as select _wstart start, k from st1 partition by tbname state_window(k)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
|
|
|
@ -130,6 +130,7 @@ extern int32_t tsMqRebalanceInterval;
|
|||
extern int32_t tsTtlUnit;
|
||||
extern int32_t tsTtlPushInterval;
|
||||
extern int32_t tsGrantHBInterval;
|
||||
extern int32_t tsUptimeInterval;
|
||||
|
||||
#define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
|
||||
|
||||
|
|
|
@ -170,6 +170,7 @@ enum {
|
|||
TD_DEF_MSG_TYPE(TDMT_MND_SPLIT_VGROUP, "split-vgroup", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_SHOW_VARIABLES, "show-variables", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_SERVER_VERSION, "server-version", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_UPTIME_TIMER, "uptime-timer", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_VND_MSG)
|
||||
|
|
|
@ -29,7 +29,7 @@ typedef void* DataSinkHandle;
|
|||
struct SRpcMsg;
|
||||
struct SSubplan;
|
||||
|
||||
typedef struct SReadHandle {
|
||||
typedef struct {
|
||||
void* tqReader;
|
||||
void* meta;
|
||||
void* config;
|
||||
|
@ -45,6 +45,7 @@ typedef struct SReadHandle {
|
|||
void* sContext; // SSnapContext*
|
||||
SHashObj *pFilterOutTbUid;
|
||||
|
||||
void* pStateBackend;
|
||||
} SReadHandle;
|
||||
|
||||
// in queue mode, data streams are seperated by msg
|
||||
|
@ -167,7 +168,7 @@ int32_t qGetQualifiedTableIdList(void* pTableList, const char* tagCond, int32_t
|
|||
|
||||
void qProcessRspMsg(void* parent, struct SRpcMsg* pMsg, struct SEpSet* pEpSet);
|
||||
|
||||
int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, SArray* pExecInfoList/*,int32_t* resNum, SExplainExecInfo** pRes*/);
|
||||
int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, SArray* pExecInfoList /*,int32_t* resNum, SExplainExecInfo** pRes*/);
|
||||
|
||||
int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len);
|
||||
|
||||
|
|
|
@ -263,6 +263,14 @@ typedef struct {
|
|||
SArray* checkpointVer;
|
||||
} SStreamRecoveringState;
|
||||
|
||||
// incremental state storage
|
||||
typedef struct {
|
||||
SStreamTask* pOwner;
|
||||
TDB* db;
|
||||
TTB* pStateDb;
|
||||
TXN txn;
|
||||
} SStreamState;
|
||||
|
||||
typedef struct SStreamTask {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
|
@ -312,6 +320,10 @@ typedef struct SStreamTask {
|
|||
|
||||
// msg handle
|
||||
SMsgCb* pMsgCb;
|
||||
|
||||
// state backend
|
||||
SStreamState* pState;
|
||||
|
||||
} SStreamTask;
|
||||
|
||||
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
|
||||
|
@ -507,7 +519,7 @@ typedef struct SStreamMeta {
|
|||
char* path;
|
||||
TDB* db;
|
||||
TTB* pTaskDb;
|
||||
TTB* pStateDb;
|
||||
TTB* pCheckpointDb;
|
||||
SHashObj* pTasks;
|
||||
SHashObj* pRecoverStatus;
|
||||
void* ahandle;
|
||||
|
@ -528,6 +540,36 @@ int32_t streamMetaCommit(SStreamMeta* pMeta);
|
|||
int32_t streamMetaRollBack(SStreamMeta* pMeta);
|
||||
int32_t streamLoadTasks(SStreamMeta* pMeta);
|
||||
|
||||
SStreamState* streamStateOpen(char* path, SStreamTask* pTask);
|
||||
void streamStateClose(SStreamState* pState);
|
||||
int32_t streamStateBegin(SStreamState* pState);
|
||||
int32_t streamStateCommit(SStreamState* pState);
|
||||
int32_t streamStateAbort(SStreamState* pState);
|
||||
|
||||
typedef struct {
|
||||
TBC* pCur;
|
||||
} SStreamStateCur;
|
||||
|
||||
#if 1
|
||||
int32_t streamStatePut(SStreamState* pState, const void* key, int32_t kLen, const void* value, int32_t vLen);
|
||||
int32_t streamStateGet(SStreamState* pState, const void* key, int32_t kLen, void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateDel(SStreamState* pState, const void* key, int32_t kLen);
|
||||
|
||||
SStreamStateCur* streamStateGetCur(SStreamState* pState, const void* key, int32_t kLen);
|
||||
SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const void* key, int32_t kLen);
|
||||
SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const void* key, int32_t kLen);
|
||||
void streamStateFreeCur(SStreamStateCur* pCur);
|
||||
|
||||
int32_t streamGetKVByCur(SStreamStateCur* pCur, void** pKey, int32_t* pKLen, void** pVal, int32_t* pVLen);
|
||||
|
||||
int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur);
|
||||
int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur);
|
||||
|
||||
int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur);
|
||||
int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -96,7 +96,12 @@ typedef struct {
|
|||
|
||||
typedef struct SQueryExecMetric {
|
||||
int64_t start; // start timestamp, us
|
||||
int64_t parsed; // start to parse, us
|
||||
int64_t syntaxStart; // start to parse, us
|
||||
int64_t syntaxEnd; // end to parse, us
|
||||
int64_t ctgStart; // start to parse, us
|
||||
int64_t ctgEnd; // end to parse, us
|
||||
int64_t semanticEnd;
|
||||
int64_t execEnd;
|
||||
int64_t send; // start to send to server, us
|
||||
int64_t rsp; // receive response from server, us
|
||||
} SQueryExecMetric;
|
||||
|
|
|
@ -29,6 +29,7 @@ extern "C" {
|
|||
#define tscDebug(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscTrace(...) do { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC ", DEBUG_TRACE, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscDebugL(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscPerf(...) do { taosPrintLog("TSC ", 0, cDebugFlag, __VA_ARGS__); } while(0)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -69,14 +69,25 @@ static void deregisterRequest(SRequestObj *pRequest) {
|
|||
int32_t currentInst = atomic_sub_fetch_64((int64_t *)&pActivity->currentRequests, 1);
|
||||
int32_t num = atomic_sub_fetch_32(&pTscObj->numOfReqs, 1);
|
||||
|
||||
int64_t duration = taosGetTimestampUs() - pRequest->metric.start;
|
||||
int64_t nowUs = taosGetTimestampUs();
|
||||
int64_t duration = nowUs - pRequest->metric.start;
|
||||
tscDebug("0x%" PRIx64 " free Request from connObj: 0x%" PRIx64 ", reqId:0x%" PRIx64 " elapsed:%" PRIu64
|
||||
" ms, current:%d, app current:%d",
|
||||
pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000, num, currentInst);
|
||||
|
||||
if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) {
|
||||
tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 "us, exec:%" PRId64 "us",
|
||||
duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
|
||||
pRequest->metric.ctgEnd - pRequest->metric.ctgStart,
|
||||
pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
|
||||
pRequest->metric.execEnd - pRequest->metric.semanticEnd);
|
||||
atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
|
||||
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
|
||||
tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 "us, exec:%" PRId64 "us",
|
||||
duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
|
||||
pRequest->metric.ctgEnd - pRequest->metric.ctgStart,
|
||||
pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
|
||||
pRequest->metric.execEnd - pRequest->metric.semanticEnd);
|
||||
atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration);
|
||||
}
|
||||
|
||||
|
@ -330,7 +341,6 @@ void doDestroyRequest(void *p) {
|
|||
schedulerFreeJob(&pRequest->body.queryJob, 0);
|
||||
|
||||
taosMemoryFreeClear(pRequest->msgBuf);
|
||||
taosMemoryFreeClear(pRequest->sqlstr);
|
||||
taosMemoryFreeClear(pRequest->pDb);
|
||||
|
||||
doFreeReqResultInfo(&pRequest->body.resInfo);
|
||||
|
@ -349,6 +359,7 @@ void doDestroyRequest(void *p) {
|
|||
taosMemoryFree(pRequest->body.param);
|
||||
}
|
||||
|
||||
taosMemoryFreeClear(pRequest->sqlstr);
|
||||
taosMemoryFree(pRequest);
|
||||
tscTrace("end to destroy request %" PRIx64 " p:%p", reqId, pRequest);
|
||||
}
|
||||
|
|
|
@ -842,6 +842,8 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
|
|||
}
|
||||
|
||||
schedulerFreeJob(&pRequest->body.queryJob, 0);
|
||||
|
||||
pRequest->metric.execEnd = taosGetTimestampUs();
|
||||
}
|
||||
|
||||
taosMemoryFree(pResult);
|
||||
|
|
|
@ -685,6 +685,8 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
|
|||
SQuery *pQuery = pWrapper->pQuery;
|
||||
SRequestObj *pRequest = pWrapper->pRequest;
|
||||
|
||||
pRequest->metric.ctgEnd = taosGetTimestampUs();
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
code = qAnalyseSqlSemantic(pWrapper->pCtx, &pWrapper->catalogReq, pResultMeta, pQuery);
|
||||
pRequest->stableQuery = pQuery->stableQuery;
|
||||
|
@ -693,6 +695,8 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
|
|||
}
|
||||
}
|
||||
|
||||
pRequest->metric.semanticEnd = taosGetTimestampUs();
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
if (pQuery->haveResultSet) {
|
||||
setResSchemaInfo(&pRequest->body.resInfo, pQuery->pResSchema, pQuery->numOfResCols);
|
||||
|
@ -784,12 +788,16 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
|
|||
|
||||
SQuery *pQuery = NULL;
|
||||
|
||||
pRequest->metric.syntaxStart = taosGetTimestampUs();
|
||||
|
||||
SCatalogReq catalogReq = {.forceUpdate = updateMetaForce, .qNodeRequired = qnodeRequired(pRequest)};
|
||||
code = qParseSqlSyntax(pCxt, &pQuery, &catalogReq);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
pRequest->metric.syntaxEnd = taosGetTimestampUs();
|
||||
|
||||
if (!updateMetaForce) {
|
||||
STscObj *pTscObj = pRequest->pTscObj;
|
||||
SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
|
||||
|
@ -816,6 +824,8 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
|
|||
.requestObjRefId = pCxt->requestRid,
|
||||
.mgmtEps = pCxt->mgmtEpSet};
|
||||
|
||||
pRequest->metric.ctgStart = taosGetTimestampUs();
|
||||
|
||||
code = catalogAsyncGetAllMeta(pCxt->pCatalog, &conn, &catalogReq, retrieveMetaCallback, pWrapper,
|
||||
&pRequest->body.queryJob);
|
||||
pCxt = NULL;
|
||||
|
|
|
@ -66,8 +66,9 @@ static const SSysDbTableSchema bnodesSchema[] = {
|
|||
};
|
||||
|
||||
static const SSysDbTableSchema clusterSchema[] = {
|
||||
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||
{.name = "id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
|
||||
{.name = "name", .bytes = TSDB_CLUSTER_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "uptime", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
|
||||
};
|
||||
|
||||
|
|
|
@ -164,6 +164,7 @@ int32_t tsMqRebalanceInterval = 2;
|
|||
int32_t tsTtlUnit = 86400;
|
||||
int32_t tsTtlPushInterval = 86400;
|
||||
int32_t tsGrantHBInterval = 60;
|
||||
int32_t tsUptimeInterval = 300; // seconds
|
||||
|
||||
#ifndef _STORAGE
|
||||
int32_t taosSetTfsCfg(SConfig *pCfg) {
|
||||
|
|
|
@ -27,6 +27,7 @@ void mndCleanupCluster(SMnode *pMnode);
|
|||
int32_t mndGetClusterName(SMnode *pMnode, char *clusterName, int32_t len);
|
||||
int64_t mndGetClusterId(SMnode *pMnode);
|
||||
int64_t mndGetClusterCreateTime(SMnode *pMnode);
|
||||
float mndGetClusterUpTime(SMnode *pMnode);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -179,6 +179,7 @@ typedef struct {
|
|||
char name[TSDB_CLUSTER_ID_LEN];
|
||||
int64_t createdTime;
|
||||
int64_t updateTime;
|
||||
int32_t upTime;
|
||||
} SClusterObj;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include "mndTrans.h"
|
||||
|
||||
#define CLUSTER_VER_NUMBE 1
|
||||
#define CLUSTER_RESERVE_SIZE 64
|
||||
#define CLUSTER_RESERVE_SIZE 60
|
||||
|
||||
static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster);
|
||||
static SSdbRow *mndClusterActionDecode(SSdbRaw *pRaw);
|
||||
|
@ -29,6 +29,7 @@ static int32_t mndClusterActionUpdate(SSdb *pSdb, SClusterObj *pOldCluster, SCl
|
|||
static int32_t mndCreateDefaultCluster(SMnode *pMnode);
|
||||
static int32_t mndRetrieveClusters(SRpcMsg *pMsg, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows);
|
||||
static void mndCancelGetNextCluster(SMnode *pMnode, void *pIter);
|
||||
static int32_t mndProcessUptimeTimer(SRpcMsg *pReq);
|
||||
|
||||
int32_t mndInitCluster(SMnode *pMnode) {
|
||||
SSdbTable table = {
|
||||
|
@ -42,8 +43,10 @@ int32_t mndInitCluster(SMnode *pMnode) {
|
|||
.deleteFp = (SdbDeleteFp)mndClusterActionDelete,
|
||||
};
|
||||
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_UPTIME_TIMER, mndProcessUptimeTimer);
|
||||
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_CLUSTER, mndRetrieveClusters);
|
||||
mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_CLUSTER, mndCancelGetNextCluster);
|
||||
|
||||
return sdbSetTable(pMnode->pSdb, table);
|
||||
}
|
||||
|
||||
|
@ -62,40 +65,69 @@ int32_t mndGetClusterName(SMnode *pMnode, char *clusterName, int32_t len) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int64_t mndGetClusterId(SMnode *pMnode) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
void *pIter = NULL;
|
||||
int64_t clusterId = -1;
|
||||
static SClusterObj *mndAcquireCluster(SMnode *pMnode) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
void *pIter = NULL;
|
||||
|
||||
while (1) {
|
||||
SClusterObj *pCluster = NULL;
|
||||
pIter = sdbFetch(pSdb, SDB_CLUSTER, pIter, (void **)&pCluster);
|
||||
if (pIter == NULL) break;
|
||||
|
||||
return pCluster;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void mndReleaseCluster(SMnode *pMnode, SClusterObj *pCluster) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
sdbRelease(pSdb, pCluster);
|
||||
}
|
||||
|
||||
int64_t mndGetClusterId(SMnode *pMnode) {
|
||||
int64_t clusterId = 0;
|
||||
SClusterObj *pCluster = mndAcquireCluster(pMnode);
|
||||
if (pCluster != NULL) {
|
||||
clusterId = pCluster->id;
|
||||
sdbRelease(pSdb, pCluster);
|
||||
mndReleaseCluster(pMnode, pCluster);
|
||||
}
|
||||
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
int64_t mndGetClusterCreateTime(SMnode *pMnode) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
void *pIter = NULL;
|
||||
int64_t createTime = INT64_MAX;
|
||||
|
||||
while (1) {
|
||||
SClusterObj *pCluster = NULL;
|
||||
pIter = sdbFetch(pSdb, SDB_CLUSTER, pIter, (void **)&pCluster);
|
||||
if (pIter == NULL) break;
|
||||
|
||||
int64_t createTime = 0;
|
||||
SClusterObj *pCluster = mndAcquireCluster(pMnode);
|
||||
if (pCluster != NULL) {
|
||||
createTime = pCluster->createdTime;
|
||||
sdbRelease(pSdb, pCluster);
|
||||
mndReleaseCluster(pMnode, pCluster);
|
||||
}
|
||||
|
||||
return createTime;
|
||||
}
|
||||
|
||||
static int32_t mndGetClusterUpTimeImp(SClusterObj *pCluster) {
|
||||
#if 0
|
||||
int32_t upTime = taosGetTimestampSec() - pCluster->updateTime / 1000;
|
||||
upTime = upTime + pCluster->upTime;
|
||||
return upTime;
|
||||
#else
|
||||
return pCluster->upTime;
|
||||
#endif
|
||||
}
|
||||
|
||||
float mndGetClusterUpTime(SMnode *pMnode) {
|
||||
int64_t upTime = 0;
|
||||
SClusterObj *pCluster = mndAcquireCluster(pMnode);
|
||||
if (pCluster != NULL) {
|
||||
upTime = mndGetClusterUpTimeImp(pCluster);
|
||||
mndReleaseCluster(pMnode, pCluster);
|
||||
}
|
||||
|
||||
return upTime / 86400.0f;
|
||||
}
|
||||
|
||||
static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
||||
|
@ -107,6 +139,7 @@ static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster) {
|
|||
SDB_SET_INT64(pRaw, dataPos, pCluster->createdTime, _OVER)
|
||||
SDB_SET_INT64(pRaw, dataPos, pCluster->updateTime, _OVER)
|
||||
SDB_SET_BINARY(pRaw, dataPos, pCluster->name, TSDB_CLUSTER_ID_LEN, _OVER)
|
||||
SDB_SET_INT32(pRaw, dataPos, pCluster->upTime, _OVER)
|
||||
SDB_SET_RESERVE(pRaw, dataPos, CLUSTER_RESERVE_SIZE, _OVER)
|
||||
|
||||
terrno = 0;
|
||||
|
@ -144,6 +177,7 @@ static SSdbRow *mndClusterActionDecode(SSdbRaw *pRaw) {
|
|||
SDB_GET_INT64(pRaw, dataPos, &pCluster->createdTime, _OVER)
|
||||
SDB_GET_INT64(pRaw, dataPos, &pCluster->updateTime, _OVER)
|
||||
SDB_GET_BINARY(pRaw, dataPos, pCluster->name, TSDB_CLUSTER_ID_LEN, _OVER)
|
||||
SDB_GET_INT32(pRaw, dataPos, &pCluster->upTime, _OVER)
|
||||
SDB_GET_RESERVE(pRaw, dataPos, CLUSTER_RESERVE_SIZE, _OVER)
|
||||
|
||||
terrno = 0;
|
||||
|
@ -162,6 +196,7 @@ _OVER:
|
|||
static int32_t mndClusterActionInsert(SSdb *pSdb, SClusterObj *pCluster) {
|
||||
mTrace("cluster:%" PRId64 ", perform insert action, row:%p", pCluster->id, pCluster);
|
||||
pSdb->pMnode->clusterId = pCluster->id;
|
||||
pCluster->updateTime = taosGetTimestampMs();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -171,7 +206,10 @@ static int32_t mndClusterActionDelete(SSdb *pSdb, SClusterObj *pCluster) {
|
|||
}
|
||||
|
||||
static int32_t mndClusterActionUpdate(SSdb *pSdb, SClusterObj *pOld, SClusterObj *pNew) {
|
||||
mTrace("cluster:%" PRId64 ", perform update action, old row:%p new row:%p", pOld->id, pOld, pNew);
|
||||
mTrace("cluster:%" PRId64 ", perform update action, old row:%p new row:%p, uptime from %d to %d", pOld->id, pOld,
|
||||
pNew, pOld->upTime, pNew->upTime);
|
||||
pOld->upTime = pNew->upTime;
|
||||
pOld->updateTime = taosGetTimestampMs();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -242,6 +280,10 @@ static int32_t mndRetrieveClusters(SRpcMsg *pMsg, SShowObj *pShow, SSDataBlock *
|
|||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, buf, false);
|
||||
|
||||
int32_t upTime = mndGetClusterUpTimeImp(pCluster);
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&upTime, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pCluster->createdTime, false);
|
||||
|
||||
|
@ -257,3 +299,40 @@ static void mndCancelGetNextCluster(SMnode *pMnode, void *pIter) {
|
|||
SSdb *pSdb = pMnode->pSdb;
|
||||
sdbCancelFetch(pSdb, pIter);
|
||||
}
|
||||
|
||||
static int32_t mndProcessUptimeTimer(SRpcMsg *pReq) {
|
||||
SMnode *pMnode = pReq->info.node;
|
||||
SClusterObj clusterObj = {0};
|
||||
SClusterObj *pCluster = mndAcquireCluster(pMnode);
|
||||
if (pCluster != NULL) {
|
||||
memcpy(&clusterObj, pCluster, sizeof(SClusterObj));
|
||||
clusterObj.upTime += tsUptimeInterval;
|
||||
mndReleaseCluster(pMnode, pCluster);
|
||||
}
|
||||
|
||||
if (clusterObj.id <= 0) {
|
||||
mError("can't get cluster info while update uptime");
|
||||
return 0;
|
||||
}
|
||||
|
||||
mTrace("update cluster uptime to %" PRId64, clusterObj.upTime);
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
|
||||
if (pTrans == NULL) return -1;
|
||||
|
||||
SSdbRaw *pCommitRaw = mndClusterActionEncode(&clusterObj);
|
||||
if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
|
||||
mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
|
||||
mndTransDrop(pTrans);
|
||||
return -1;
|
||||
}
|
||||
sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY);
|
||||
|
||||
if (mndTransPrepare(pMnode, pTrans) != 0) {
|
||||
mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
|
||||
mndTransDrop(pTrans);
|
||||
return -1;
|
||||
}
|
||||
|
||||
mndTransDrop(pTrans);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -100,6 +100,16 @@ static void mndGrantHeartBeat(SMnode *pMnode) {
|
|||
}
|
||||
}
|
||||
|
||||
static void mndIncreaseUpTime(SMnode *pMnode) {
|
||||
int32_t contLen = 0;
|
||||
void *pReq = mndBuildTimerMsg(&contLen);
|
||||
if (pReq != NULL) {
|
||||
SRpcMsg rpcMsg = {
|
||||
.msgType = TDMT_MND_UPTIME_TIMER, .pCont = pReq, .contLen = contLen, .info.ahandle = (void *)0x9528};
|
||||
tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg);
|
||||
}
|
||||
}
|
||||
|
||||
static void *mndThreadFp(void *param) {
|
||||
SMnode *pMnode = param;
|
||||
int64_t lastTime = 0;
|
||||
|
@ -122,13 +132,17 @@ static void *mndThreadFp(void *param) {
|
|||
mndCalMqRebalance(pMnode);
|
||||
}
|
||||
|
||||
if (lastTime % (tsTelemInterval * 10) == 0) {
|
||||
if (lastTime % (tsTelemInterval * 10) == 1) {
|
||||
mndPullupTelem(pMnode);
|
||||
}
|
||||
|
||||
if (lastTime % (tsGrantHBInterval * 10) == 0) {
|
||||
mndGrantHeartBeat(pMnode);
|
||||
}
|
||||
|
||||
if ((lastTime % (tsUptimeInterval * 10)) == ((tsUptimeInterval - 1) * 10)) {
|
||||
mndIncreaseUpTime(pMnode);
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
@ -556,7 +570,8 @@ static int32_t mndCheckMnodeState(SRpcMsg *pMsg) {
|
|||
}
|
||||
if (mndAcquireRpcRef(pMsg->info.node) == 0) return 0;
|
||||
if (pMsg->msgType == TDMT_MND_MQ_TIMER || pMsg->msgType == TDMT_MND_TELEM_TIMER ||
|
||||
pMsg->msgType == TDMT_MND_TRANS_TIMER || pMsg->msgType == TDMT_MND_TTL_TIMER) {
|
||||
pMsg->msgType == TDMT_MND_TRANS_TIMER || pMsg->msgType == TDMT_MND_TTL_TIMER ||
|
||||
pMsg->msgType == TDMT_MND_UPTIME_TIMER) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -705,7 +720,8 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr
|
|||
if (pObj->id == pMnode->selfDnodeId) {
|
||||
pClusterInfo->first_ep_dnode_id = pObj->id;
|
||||
tstrncpy(pClusterInfo->first_ep, pObj->pDnode->ep, sizeof(pClusterInfo->first_ep));
|
||||
pClusterInfo->master_uptime = (ms - pObj->stateStartTime) / (86400000.0f);
|
||||
pClusterInfo->master_uptime = mndGetClusterUpTime(pMnode);
|
||||
// pClusterInfo->master_uptime = (ms - pObj->stateStartTime) / (86400000.0f);
|
||||
tstrncpy(desc.role, syncStr(TAOS_SYNC_STATE_LEADER), sizeof(desc.role));
|
||||
} else {
|
||||
tstrncpy(desc.role, syncStr(pObj->state), sizeof(desc.role));
|
||||
|
|
|
@ -68,7 +68,7 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM
|
|||
if (pMgmt->errCode != 0) {
|
||||
mError("trans:%d, failed to propose since %s, post sem", transId, tstrerror(pMgmt->errCode));
|
||||
} else {
|
||||
mInfo("trans:%d, is proposed and post sem", transId, tstrerror(pMgmt->errCode));
|
||||
mDebug("trans:%d, is proposed and post sem", transId, tstrerror(pMgmt->errCode));
|
||||
}
|
||||
pMgmt->transId = 0;
|
||||
taosWUnLockLatch(&pMgmt->lock);
|
||||
|
@ -118,7 +118,7 @@ void mndReConfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReConfigCbMeta cbM
|
|||
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
||||
|
||||
pMgmt->errCode = cbMeta.code;
|
||||
mInfo("trans:-1, sync reconfig is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64, pMgmt->transId,
|
||||
mDebug("trans:-1, sync reconfig is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64, pMgmt->transId,
|
||||
cbMeta.code, cbMeta.index, cbMeta.term);
|
||||
|
||||
taosWLockLatch(&pMgmt->lock);
|
||||
|
@ -126,7 +126,7 @@ void mndReConfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReConfigCbMeta cbM
|
|||
if (pMgmt->errCode != 0) {
|
||||
mError("trans:-1, failed to propose sync reconfig since %s, post sem", tstrerror(pMgmt->errCode));
|
||||
} else {
|
||||
mInfo("trans:-1, sync reconfig is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64 " post sem",
|
||||
mDebug("trans:-1, sync reconfig is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64 " post sem",
|
||||
pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term);
|
||||
}
|
||||
pMgmt->transId = 0;
|
||||
|
@ -228,7 +228,7 @@ int32_t mndInitSync(SMnode *pMnode) {
|
|||
syncInfo.isStandBy = pMgmt->standby;
|
||||
syncInfo.snapshotStrategy = SYNC_STRATEGY_STANDARD_SNAPSHOT;
|
||||
|
||||
mInfo("start to open mnode sync, standby:%d", pMgmt->standby);
|
||||
mDebug("start to open mnode sync, standby:%d", pMgmt->standby);
|
||||
if (pMgmt->standby || pMgmt->replica.id > 0) {
|
||||
SSyncCfg *pCfg = &syncInfo.syncCfg;
|
||||
pCfg->replicaNum = 1;
|
||||
|
@ -236,7 +236,7 @@ int32_t mndInitSync(SMnode *pMnode) {
|
|||
SNodeInfo *pNode = &pCfg->nodeInfo[0];
|
||||
tstrncpy(pNode->nodeFqdn, pMgmt->replica.fqdn, sizeof(pNode->nodeFqdn));
|
||||
pNode->nodePort = pMgmt->replica.port;
|
||||
mInfo("mnode ep:%s:%u", pNode->nodeFqdn, pNode->nodePort);
|
||||
mDebug("mnode ep:%s:%u", pNode->nodeFqdn, pNode->nodePort);
|
||||
}
|
||||
|
||||
tsem_init(&pMgmt->syncSem, 0, 0);
|
||||
|
|
|
@ -5,7 +5,9 @@ target_link_libraries(
|
|||
PUBLIC sut
|
||||
)
|
||||
|
||||
add_test(
|
||||
NAME smaTest
|
||||
COMMAND smaTest
|
||||
)
|
||||
if(NOT ${TD_WINDOWS})
|
||||
add_test(
|
||||
NAME smaTest
|
||||
COMMAND smaTest
|
||||
)
|
||||
endif(NOT ${TD_WINDOWS})
|
||||
|
|
|
@ -5,7 +5,9 @@ target_link_libraries(
|
|||
PUBLIC sut
|
||||
)
|
||||
|
||||
add_test(
|
||||
NAME stbTest
|
||||
COMMAND stbTest
|
||||
)
|
||||
if(NOT ${TD_WINDOWS})
|
||||
add_test(
|
||||
NAME stbTest
|
||||
COMMAND stbTest
|
||||
)
|
||||
endif(NOT ${TD_WINDOWS})
|
|
@ -79,6 +79,10 @@ STQ* tqOpen(const char* path, SVnode* pVnode) {
|
|||
ASSERT(0);
|
||||
}
|
||||
|
||||
if (streamLoadTasks(pTq->pStreamMeta) < 0) {
|
||||
ASSERT(0);
|
||||
}
|
||||
|
||||
return pTq;
|
||||
}
|
||||
|
||||
|
@ -605,6 +609,11 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) {
|
|||
ASSERT(pTask->exec.executor);
|
||||
}
|
||||
|
||||
pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask);
|
||||
if (pTask->pState == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// sink
|
||||
/*pTask->ahandle = pTq->pVnode;*/
|
||||
if (pTask->outputType == TASK_OUTPUT__SMA) {
|
||||
|
|
|
@ -178,10 +178,12 @@ static int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pR
|
|||
static void setComposedBlockFlag(STsdbReader* pReader, bool composed);
|
||||
static bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order);
|
||||
|
||||
static void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
|
||||
static void doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
|
||||
STsdbReader* pReader, bool* freeTSRow);
|
||||
static void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader,
|
||||
STSRow** pTSRow);
|
||||
static int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, int64_t key, STsdbReader* pReader);
|
||||
|
||||
static int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STbData* pMemTbData,
|
||||
STbData* piMemTbData);
|
||||
static STsdb* getTsdbByRetentions(SVnode* pVnode, TSKEY winSKey, SRetention* retentions, const char* idstr,
|
||||
|
@ -1414,7 +1416,7 @@ static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInf
|
|||
int64_t minKey = 0;
|
||||
if (pReader->order == TSDB_ORDER_ASC) {
|
||||
minKey = INT64_MAX; // chosen the minimum value
|
||||
if (minKey > tsLast && pLastBlockReader->lastBlockData.nRow > 0) {
|
||||
if (minKey > tsLast && hasDataInLastBlock(pLastBlockReader)) {
|
||||
minKey = tsLast;
|
||||
}
|
||||
|
||||
|
@ -1427,7 +1429,7 @@ static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInf
|
|||
}
|
||||
} else {
|
||||
minKey = INT64_MIN;
|
||||
if (minKey < tsLast && pLastBlockReader->lastBlockData.nRow > 0) {
|
||||
if (minKey < tsLast && hasDataInLastBlock(pLastBlockReader)) {
|
||||
minKey = tsLast;
|
||||
}
|
||||
|
||||
|
@ -1510,77 +1512,81 @@ static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInf
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, TSDBROW* pRow,
|
||||
SIterInfo* pIter, int64_t key, SLastBlockReader* pLastBlockReader) {
|
||||
SRowMerger merge = {0};
|
||||
STSRow* pTSRow = NULL;
|
||||
SBlockData* pBlockData = &pReader->status.fileBlockData;
|
||||
static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader* pLastBlockReader, int64_t key,
|
||||
STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData) {
|
||||
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
|
||||
|
||||
TSDBKEY k = TSDBROW_KEY(pRow);
|
||||
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
|
||||
SArray* pDelList = pBlockScanInfo->delSkyline;
|
||||
bool freeTSRow = false;
|
||||
uint64_t uid = pBlockScanInfo->uid;
|
||||
if (pBlockData->nRow > 0) {
|
||||
// no last block available, only data block exists
|
||||
if (pLastBlockReader->lastBlockData.nRow == 0 || (!hasDataInLastBlock(pLastBlockReader))) {
|
||||
return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader);
|
||||
}
|
||||
|
||||
// row in last file block
|
||||
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
|
||||
int64_t ts = getCurrentKeyInLastBlock(pLastBlockReader);
|
||||
ASSERT(ts >= key);
|
||||
|
||||
if (ASCENDING_TRAVERSE(pReader->order)) {
|
||||
if (key < ts) { // imem, mem are all empty, file blocks (data blocks and last block) exist
|
||||
return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader);
|
||||
} else if (key == ts) {
|
||||
STSRow* pTSRow = NULL;
|
||||
SRowMerger merge = {0};
|
||||
|
||||
// ascending order traverse
|
||||
if (ASCENDING_TRAVERSE(pReader->order)) {
|
||||
if (key < k.ts) {
|
||||
// imem & mem are all empty, only file exist
|
||||
if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, &merge);
|
||||
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
freeTSRow = true;
|
||||
}
|
||||
} else if (k.ts < key) { // k.ts < key
|
||||
doMergeMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
} else { // k.ts == key, ascending order: file block ----> imem rows -----> mem rows
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
|
||||
|
||||
tRowMerge(&merge, pRow);
|
||||
doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
freeTSRow = true;
|
||||
}
|
||||
} else { // descending order scan
|
||||
if (key < k.ts) {
|
||||
doMergeMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
} else if (k.ts < key) {
|
||||
if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
|
||||
taosMemoryFree(pTSRow);
|
||||
tRowMergerClear(&merge);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
freeTSRow = true;
|
||||
ASSERT(0);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
} else { // descending order: mem rows -----> imem rows ------> file block
|
||||
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
|
||||
} else { // desc order
|
||||
SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData;
|
||||
TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
|
||||
|
||||
tRowMergerInit(&merge, pRow, pSchema);
|
||||
doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
STSRow* pTSRow = NULL;
|
||||
SRowMerger merge = {0};
|
||||
tRowMergerInit(&merge, &fRow1, pReader->pSchema);
|
||||
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, &merge);
|
||||
|
||||
tRowMerge(&merge, &fRow);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
if (ts == key) {
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
}
|
||||
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
freeTSRow = true;
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
|
||||
|
||||
taosMemoryFree(pTSRow);
|
||||
tRowMergerClear(&merge);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
} else { // only last block exists
|
||||
SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData;
|
||||
int64_t tsLastBlock = getCurrentKeyInLastBlock(pLastBlockReader);
|
||||
|
||||
tRowMergerClear(&merge);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
STSRow* pTSRow = NULL;
|
||||
SRowMerger merge = {0};
|
||||
|
||||
TSDBROW fRow = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
|
||||
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
|
||||
|
||||
if (freeTSRow) {
|
||||
taosMemoryFree(pTSRow);
|
||||
tRowMergerClear(&merge);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t doMergeMultiLevelRowsRv(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData, SLastBlockReader* pLastBlockReader) {
|
||||
|
@ -1734,6 +1740,7 @@ static int32_t doMergeMultiLevelRowsRv(STsdbReader* pReader, STableBlockScanInfo
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
#if 0
|
||||
static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData) {
|
||||
SRowMerger merge = {0};
|
||||
STSRow* pTSRow = NULL;
|
||||
|
@ -1779,7 +1786,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
// [3] ik.ts < key <= k.ts
|
||||
// [4] ik.ts < k.ts <= key
|
||||
if (ik.ts < k.ts) {
|
||||
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
if (freeTSRow) {
|
||||
taosMemoryFree(pTSRow);
|
||||
|
@ -1790,7 +1797,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
// [5] k.ts < key <= ik.ts
|
||||
// [6] k.ts < ik.ts <= key
|
||||
if (k.ts < ik.ts) {
|
||||
doMergeMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
doMergeMemTableMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
if (freeTSRow) {
|
||||
taosMemoryFree(pTSRow);
|
||||
|
@ -1836,7 +1843,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
// [3] ik.ts > k.ts >= Key
|
||||
// [4] ik.ts > key >= k.ts
|
||||
if (ik.ts > key) {
|
||||
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
if (freeTSRow) {
|
||||
taosMemoryFree(pTSRow);
|
||||
|
@ -1859,7 +1866,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
|
||||
//[7] key = ik.ts > k.ts
|
||||
if (key == ik.ts) {
|
||||
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
|
||||
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
|
||||
tRowMerge(&merge, &fRow);
|
||||
|
@ -1876,6 +1883,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool isValidFileBlockRow(SBlockData* pBlockData, SFileBlockDumpInfo* pDumpInfo,
|
||||
STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader) {
|
||||
|
@ -1982,10 +1990,35 @@ static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader) {
|
|||
if (*pLastBlockReader->rowIndex == ALL_ROWS_CHECKED_INDEX) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ASSERT(pLastBlockReader->lastBlockData.nRow > 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
// todo refactor
|
||||
int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, int64_t key, STsdbReader* pReader) {
|
||||
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
|
||||
|
||||
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
|
||||
|
||||
if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
STSRow* pTSRow = NULL;
|
||||
SRowMerger merge = {0};
|
||||
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
|
||||
|
||||
taosMemoryFree(pTSRow);
|
||||
tRowMergerClear(&merge);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo,
|
||||
SBlockData* pBlockData, SLastBlockReader* pLastBlockReader) {
|
||||
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
|
||||
|
@ -2002,112 +2035,13 @@ static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanI
|
|||
return doMergeBufAndFileRows_Rv(pReader, pBlockScanInfo, piRow, &pBlockScanInfo->iiter, key, pLastBlockReader);
|
||||
}
|
||||
|
||||
// mem + file
|
||||
// mem + file + last block
|
||||
if (pBlockScanInfo->iter.hasVal) {
|
||||
return doMergeBufAndFileRows_Rv(pReader, pBlockScanInfo, pRow, &pBlockScanInfo->iter, key, pLastBlockReader);
|
||||
}
|
||||
|
||||
if (pBlockData->nRow > 0) {
|
||||
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
|
||||
|
||||
// no last block available, only data block exists
|
||||
if (pLastBlockReader->lastBlockData.nRow == 0 || (!hasDataInLastBlock(pLastBlockReader))) {
|
||||
if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
STSRow* pTSRow = NULL;
|
||||
SRowMerger merge = {0};
|
||||
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
|
||||
|
||||
taosMemoryFree(pTSRow);
|
||||
tRowMergerClear(&merge);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
// row in last file block
|
||||
int64_t ts = getCurrentKeyInLastBlock(pLastBlockReader);
|
||||
ASSERT(ts >= key);
|
||||
|
||||
if (ASCENDING_TRAVERSE(pReader->order)) {
|
||||
if (key < ts) {
|
||||
// imem & mem are all empty, only file exist
|
||||
if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
STSRow* pTSRow = NULL;
|
||||
SRowMerger merge = {0};
|
||||
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
|
||||
|
||||
taosMemoryFree(pTSRow);
|
||||
tRowMergerClear(&merge);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
} else if (key == ts) {
|
||||
STSRow* pTSRow = NULL;
|
||||
SRowMerger merge = {0};
|
||||
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, &merge);
|
||||
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
|
||||
|
||||
taosMemoryFree(pTSRow);
|
||||
tRowMergerClear(&merge);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
ASSERT(0);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
} else { // desc order
|
||||
SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData;
|
||||
TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
|
||||
|
||||
STSRow* pTSRow = NULL;
|
||||
SRowMerger merge = {0};
|
||||
tRowMergerInit(&merge, &fRow1, pReader->pSchema);
|
||||
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, &merge);
|
||||
|
||||
if (ts == key) {
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
}
|
||||
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
|
||||
|
||||
taosMemoryFree(pTSRow);
|
||||
tRowMergerClear(&merge);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
} else { // only last block exists
|
||||
SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData;
|
||||
int64_t tsLastBlock = getCurrentKeyInLastBlock(pLastBlockReader);
|
||||
|
||||
STSRow* pTSRow = NULL;
|
||||
SRowMerger merge = {0};
|
||||
|
||||
TSDBROW fRow = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
|
||||
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
|
||||
|
||||
taosMemoryFree(pTSRow);
|
||||
tRowMergerClear(&merge);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
// files data blocks + last block
|
||||
return mergeFileBlockAndLastBlock(pReader, pLastBlockReader, key, pBlockScanInfo, pBlockData);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2132,9 +2066,8 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
|
|||
|
||||
while (1) {
|
||||
// todo check the validate of row in file block
|
||||
bool hasBlockData = false;
|
||||
{
|
||||
bool hasBlockData = false;
|
||||
|
||||
while (pBlockData->nRow > 0) { // find the first qualified row in data block
|
||||
if (isValidFileBlockRow(pBlockData, pDumpInfo, pBlockScanInfo, pReader)) {
|
||||
hasBlockData = true;
|
||||
|
@ -2149,13 +2082,13 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
|
|||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool hasBlockLData = hasDataInLastBlock(pLastBlockReader);
|
||||
bool hasBlockLData = hasDataInLastBlock(pLastBlockReader);
|
||||
|
||||
// no data in last block and block, no need to proceed.
|
||||
if ((hasBlockData == false) && (hasBlockLData == false)) {
|
||||
break;
|
||||
}
|
||||
// no data in last block and block, no need to proceed.
|
||||
if ((hasBlockData == false) && (hasBlockLData == false)) {
|
||||
break;
|
||||
}
|
||||
|
||||
buildComposedDataBlockImpl(pReader, pBlockScanInfo, pBlockData, pLastBlockReader);
|
||||
|
@ -3115,7 +3048,7 @@ int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockSc
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
|
||||
void doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
|
||||
STsdbReader* pReader, bool* freeTSRow) {
|
||||
TSDBROW* pNextRow = NULL;
|
||||
TSDBROW current = *pRow;
|
||||
|
@ -3197,6 +3130,7 @@ int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pR
|
|||
TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
|
||||
TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader);
|
||||
SArray* pDelList = pBlockScanInfo->delSkyline;
|
||||
uint64_t uid = pBlockScanInfo->uid;
|
||||
|
||||
// todo refactor
|
||||
bool asc = ASCENDING_TRAVERSE(pReader->order);
|
||||
|
@ -3218,10 +3152,12 @@ int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pR
|
|||
TSDBKEY k = TSDBROW_KEY(pRow);
|
||||
TSDBKEY ik = TSDBROW_KEY(piRow);
|
||||
|
||||
if (ik.ts < k.ts) { // ik.ts < k.ts
|
||||
doMergeMultiRows(piRow, pBlockScanInfo->uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
|
||||
} else if (k.ts < ik.ts) {
|
||||
doMergeMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow);
|
||||
if (ik.ts != k.ts) {
|
||||
if (((ik.ts < k.ts) && asc) || ((ik.ts > k.ts) && (!asc))) { // ik.ts < k.ts
|
||||
doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
|
||||
} else if (((k.ts < ik.ts) && asc) || ((k.ts > ik.ts) && (!asc))) {
|
||||
doMergeMemTableMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow);
|
||||
}
|
||||
} else { // ik.ts == k.ts
|
||||
doMergeMemIMemRows(pRow, piRow, pBlockScanInfo, pReader, pTSRow);
|
||||
*freeTSRow = true;
|
||||
|
@ -3231,12 +3167,12 @@ int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pR
|
|||
}
|
||||
|
||||
if (pBlockScanInfo->iter.hasVal && pRow != NULL) {
|
||||
doMergeMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow);
|
||||
doMergeMemTableMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (pBlockScanInfo->iiter.hasVal && piRow != NULL) {
|
||||
doMergeMultiRows(piRow, pBlockScanInfo->uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
|
||||
doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -1570,12 +1570,9 @@ int32_t qExplainGetRspFromCtx(void *ctx, SRetrieveTableRsp **pRsp) {
|
|||
|
||||
SColumnInfoData *pInfoData = taosArrayGet(pBlock->pDataBlock, 0);
|
||||
|
||||
char buf[1024] = {0};
|
||||
for (int32_t i = 0; i < rowNum; ++i) {
|
||||
SQueryExplainRowInfo *row = taosArrayGet(pCtx->rows, i);
|
||||
varDataCopy(buf, row->buf);
|
||||
ASSERT(varDataTLen(row->buf) == row->len);
|
||||
colDataAppend(pInfoData, i, buf, false);
|
||||
colDataAppend(pInfoData, i, row->buf, false);
|
||||
}
|
||||
|
||||
pBlock->info.rows = rowNum;
|
||||
|
|
|
@ -80,11 +80,9 @@ struct SqlFunctionCtx;
|
|||
|
||||
size_t getResultRowSize(struct SqlFunctionCtx* pCtx, int32_t numOfOutput);
|
||||
void initResultRowInfo(SResultRowInfo* pResultRowInfo);
|
||||
void cleanupResultRowInfo(SResultRowInfo* pResultRowInfo);
|
||||
|
||||
void initResultRow(SResultRow* pResultRow);
|
||||
void closeResultRow(SResultRow* pResultRow);
|
||||
bool isResultRowClosed(SResultRow* pResultRow);
|
||||
|
||||
struct SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t index, const int32_t* offset);
|
||||
|
||||
|
|
|
@ -152,6 +152,7 @@ typedef struct {
|
|||
SQueryTableDataCond tableCond;
|
||||
int64_t recoverStartVer;
|
||||
int64_t recoverEndVer;
|
||||
SStreamState* pState;
|
||||
} SStreamTaskInfo;
|
||||
|
||||
typedef struct {
|
||||
|
@ -1038,7 +1039,7 @@ bool functionNeedToExecute(SqlFunctionCtx* pCtx);
|
|||
bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup);
|
||||
bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup);
|
||||
bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup);
|
||||
void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid);
|
||||
void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, int32_t uidCol, uint64_t* pID);
|
||||
void printDataBlock(SSDataBlock* pBlock, const char* flag);
|
||||
|
||||
int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition,
|
||||
|
|
|
@ -31,20 +31,6 @@ void initResultRowInfo(SResultRowInfo* pResultRowInfo) {
|
|||
pResultRowInfo->cur.pageId = -1;
|
||||
}
|
||||
|
||||
void cleanupResultRowInfo(SResultRowInfo* pResultRowInfo) {
|
||||
if (pResultRowInfo == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pResultRowInfo->size; ++i) {
|
||||
// if (pResultRowInfo->pResult[i]) {
|
||||
// taosMemoryFreeClear(pResultRowInfo->pResult[i]->key);
|
||||
// }
|
||||
}
|
||||
}
|
||||
|
||||
bool isResultRowClosed(SResultRow* pRow) { return (pRow->closed == true); }
|
||||
|
||||
void closeResultRow(SResultRow* pResultRow) { pResultRow->closed = true; }
|
||||
|
||||
// TODO refactor: use macro
|
||||
|
@ -484,6 +470,7 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray
|
|||
SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)};
|
||||
code = createResultData(&type, rows, &output);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
qError("failed to create result, reason:%s", tstrerror(code));
|
||||
terrno = code;
|
||||
goto end;
|
||||
|
@ -493,6 +480,7 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray
|
|||
if(code != TSDB_CODE_SUCCESS){
|
||||
qError("failed to calculate scalar, reason:%s", tstrerror(code));
|
||||
terrno = code;
|
||||
goto end;
|
||||
}
|
||||
// int64_t st2 = taosGetTimestampUs();
|
||||
// qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1);
|
||||
|
@ -779,11 +767,13 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
|
|||
}
|
||||
|
||||
if (pTagCond) {
|
||||
terrno = TDB_CODE_SUCCESS;
|
||||
SColumnInfoData* pColInfoData = getColInfoResult(metaHandle, pListInfo->suid, res, pTagCond);
|
||||
if(terrno != TDB_CODE_SUCCESS){
|
||||
colDataDestroy(pColInfoData);
|
||||
taosMemoryFreeClear(pColInfoData);
|
||||
taosArrayDestroy(res);
|
||||
qError("failed to getColInfoResult, code: %s", tstrerror(terrno));
|
||||
return terrno;
|
||||
}
|
||||
|
||||
|
|