Merge remote-tracking branch 'origin/3.0' into enh/clientPolicy
This commit is contained in:
commit
4048c99282
|
@ -34,7 +34,7 @@ endif(${BUILD_TEST})
|
||||||
|
|
||||||
add_subdirectory(source)
|
add_subdirectory(source)
|
||||||
add_subdirectory(tools)
|
add_subdirectory(tools)
|
||||||
add_subdirectory(tests)
|
add_subdirectory(utils)
|
||||||
add_subdirectory(examples/c)
|
add_subdirectory(examples/c)
|
||||||
|
|
||||||
# docs
|
# docs
|
||||||
|
|
|
@ -87,7 +87,7 @@ IF ("${CPUTYPE}" STREQUAL "")
|
||||||
SET(TD_ARM_32 TRUE)
|
SET(TD_ARM_32 TRUE)
|
||||||
ADD_DEFINITIONS("-D_TD_ARM_")
|
ADD_DEFINITIONS("-D_TD_ARM_")
|
||||||
ADD_DEFINITIONS("-D_TD_ARM_32")
|
ADD_DEFINITIONS("-D_TD_ARM_32")
|
||||||
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
|
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "(aarch64)|(arm64)")
|
||||||
MESSAGE(STATUS "The current platform is aarch64")
|
MESSAGE(STATUS "The current platform is aarch64")
|
||||||
SET(PLATFORM_ARCH_STR "arm64")
|
SET(PLATFORM_ARCH_STR "arm64")
|
||||||
SET(TD_ARM_64 TRUE)
|
SET(TD_ARM_64 TRUE)
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
IF (DEFINED VERNUMBER)
|
IF (DEFINED VERNUMBER)
|
||||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||||
ELSE ()
|
ELSE ()
|
||||||
SET(TD_VER_NUMBER "3.0.0.1")
|
SET(TD_VER_NUMBER "3.0.1.0")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF (DEFINED VERCOMPATIBLE)
|
IF (DEFINED VERCOMPATIBLE)
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# taosadapter
|
# taosadapter
|
||||||
ExternalProject_Add(taosadapter
|
ExternalProject_Add(taosadapter
|
||||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||||
GIT_TAG abed566
|
GIT_TAG 71e7ccf
|
||||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||||
BINARY_DIR ""
|
BINARY_DIR ""
|
||||||
#BUILD_IN_SOURCE TRUE
|
#BUILD_IN_SOURCE TRUE
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# taos-tools
|
# taos-tools
|
||||||
ExternalProject_Add(taos-tools
|
ExternalProject_Add(taos-tools
|
||||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||||
GIT_TAG 2460442
|
GIT_TAG e7270c9
|
||||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||||
BINARY_DIR ""
|
BINARY_DIR ""
|
||||||
#BUILD_IN_SOURCE TRUE
|
#BUILD_IN_SOURCE TRUE
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# taosws-rs
|
# taosws-rs
|
||||||
ExternalProject_Add(taosws-rs
|
ExternalProject_Add(taosws-rs
|
||||||
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
|
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
|
||||||
GIT_TAG 7a54d21
|
GIT_TAG e771403
|
||||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
|
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
|
||||||
BINARY_DIR ""
|
BINARY_DIR ""
|
||||||
#BUILD_IN_SOURCE TRUE
|
#BUILD_IN_SOURCE TRUE
|
||||||
|
|
|
@ -6,101 +6,100 @@ In order to explain the basic concepts and provide some sample code, the TDengin
|
||||||
|
|
||||||
<div className="center-table">
|
<div className="center-table">
|
||||||
<table>
|
<table>
|
||||||
<thead><tr>
|
<thead>
|
||||||
<th>Device ID</th>
|
<tr>
|
||||||
<th>Time Stamp</th>
|
<th rowSpan="2">Device ID</th>
|
||||||
<th colSpan="3">Collected Metrics</th>
|
<th rowSpan="2">Timestamp</th>
|
||||||
<th colSpan="2">Tags</th>
|
<th colSpan="3">Collected Metrics</th>
|
||||||
|
<th colSpan="2">Tags</th>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<th>Device ID</th>
|
<th>current</th>
|
||||||
<th>Time Stamp</th>
|
<th>voltage</th>
|
||||||
<th>current</th>
|
<th>phase</th>
|
||||||
<th>voltage</th>
|
<th>location</th>
|
||||||
<th>phase</th>
|
<th>groupid</th>
|
||||||
<th>location</th>
|
</tr>
|
||||||
<th>groupId</th>
|
</thead>
|
||||||
</tr>
|
<tbody>
|
||||||
</thead>
|
<tr>
|
||||||
<tbody>
|
<td>d1001</td>
|
||||||
<tr>
|
<td>1538548685000</td>
|
||||||
<td>d1001</td>
|
<td>10.3</td>
|
||||||
<td>1538548685000</td>
|
<td>219</td>
|
||||||
<td>10.3</td>
|
<td>0.31</td>
|
||||||
<td>219</td>
|
<td>California.SanFrancisco</td>
|
||||||
<td>0.31</td>
|
<td>2</td>
|
||||||
<td>California.SanFrancisco</td>
|
</tr>
|
||||||
<td>2</td>
|
<tr>
|
||||||
</tr>
|
<td>d1002</td>
|
||||||
<tr>
|
<td>1538548684000</td>
|
||||||
<td>d1002</td>
|
<td>10.2</td>
|
||||||
<td>1538548684000</td>
|
<td>220</td>
|
||||||
<td>10.2</td>
|
<td>0.23</td>
|
||||||
<td>220</td>
|
<td>California.SanFrancisco</td>
|
||||||
<td>0.23</td>
|
<td>3</td>
|
||||||
<td>California.SanFrancisco</td>
|
</tr>
|
||||||
<td>3</td>
|
<tr>
|
||||||
</tr>
|
<td>d1003</td>
|
||||||
<tr>
|
<td>1538548686500</td>
|
||||||
<td>d1003</td>
|
<td>11.5</td>
|
||||||
<td>1538548686500</td>
|
<td>221</td>
|
||||||
<td>11.5</td>
|
<td>0.35</td>
|
||||||
<td>221</td>
|
<td>California.LosAngeles</td>
|
||||||
<td>0.35</td>
|
<td>3</td>
|
||||||
<td>California.LosAngeles</td>
|
</tr>
|
||||||
<td>3</td>
|
<tr>
|
||||||
</tr>
|
<td>d1004</td>
|
||||||
<tr>
|
<td>1538548685500</td>
|
||||||
<td>d1004</td>
|
<td>13.4</td>
|
||||||
<td>1538548685500</td>
|
<td>223</td>
|
||||||
<td>13.4</td>
|
<td>0.29</td>
|
||||||
<td>223</td>
|
<td>California.LosAngeles</td>
|
||||||
<td>0.29</td>
|
<td>2</td>
|
||||||
<td>California.LosAngeles</td>
|
</tr>
|
||||||
<td>2</td>
|
<tr>
|
||||||
</tr>
|
<td>d1001</td>
|
||||||
<tr>
|
<td>1538548695000</td>
|
||||||
<td>d1001</td>
|
<td>12.6</td>
|
||||||
<td>1538548695000</td>
|
<td>218</td>
|
||||||
<td>12.6</td>
|
<td>0.33</td>
|
||||||
<td>218</td>
|
<td>California.SanFrancisco</td>
|
||||||
<td>0.33</td>
|
<td>2</td>
|
||||||
<td>California.SanFrancisco</td>
|
</tr>
|
||||||
<td>2</td>
|
<tr>
|
||||||
</tr>
|
<td>d1004</td>
|
||||||
<tr>
|
<td>1538548696600</td>
|
||||||
<td>d1004</td>
|
<td>11.8</td>
|
||||||
<td>1538548696600</td>
|
<td>221</td>
|
||||||
<td>11.8</td>
|
<td>0.28</td>
|
||||||
<td>221</td>
|
<td>California.LosAngeles</td>
|
||||||
<td>0.28</td>
|
<td>2</td>
|
||||||
<td>California.LosAngeles</td>
|
</tr>
|
||||||
<td>2</td>
|
<tr>
|
||||||
</tr>
|
<td>d1002</td>
|
||||||
<tr>
|
<td>1538548696650</td>
|
||||||
<td>d1002</td>
|
<td>10.3</td>
|
||||||
<td>1538548696650</td>
|
<td>218</td>
|
||||||
<td>10.3</td>
|
<td>0.25</td>
|
||||||
<td>218</td>
|
<td>California.SanFrancisco</td>
|
||||||
<td>0.25</td>
|
<td>3</td>
|
||||||
<td>California.SanFrancisco</td>
|
</tr>
|
||||||
<td>3</td>
|
<tr>
|
||||||
</tr>
|
<td>d1001</td>
|
||||||
<tr>
|
<td>1538548696800</td>
|
||||||
<td>d1001</td>
|
<td>12.3</td>
|
||||||
<td>1538548696800</td>
|
<td>221</td>
|
||||||
<td>12.3</td>
|
<td>0.31</td>
|
||||||
<td>221</td>
|
<td>California.SanFrancisco</td>
|
||||||
<td>0.31</td>
|
<td>2</td>
|
||||||
<td>California.SanFrancisco</td>
|
</tr>
|
||||||
<td>2</td>
|
</tbody>
|
||||||
</tr>
|
|
||||||
</tbody>
|
|
||||||
</table>
|
</table>
|
||||||
<a href="#model_table1">Table 1: Smart meter example data</a>
|
<a href="#model_table1">Table 1: Smart meter example data</a>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
Each row contains the device ID, time stamp, collected metrics (current, voltage, phase as above), and static tags (location and groupId in Table 1) associated with the devices. Each smart meter generates a row (measurement) in a pre-defined time interval or triggered by an external event. The device produces a sequence of measurements with associated time stamps.
|
Each row contains the device ID, timestamp, collected metrics (`current`, `voltage`, `phase` as above), and static tags (`location` and `groupid` in Table 1) associated with the devices. Each smart meter generates a row (measurement) in a pre-defined time interval or triggered by an external event. The device produces a sequence of measurements with associated timestamps.
|
||||||
|
|
||||||
## Metric
|
## Metric
|
||||||
|
|
||||||
|
@ -112,22 +111,22 @@ Label/Tag refers to the static properties of sensors, equipment or other types o
|
||||||
|
|
||||||
## Data Collection Point
|
## Data Collection Point
|
||||||
|
|
||||||
Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same time stamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points. In the smart meters example, d1001, d1002, d1003, and d1004 are the data collection points.
|
Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same timestamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points. In the smart meters example, d1001, d1002, d1003, and d1004 are the data collection points.
|
||||||
|
|
||||||
## Table
|
## Table
|
||||||
|
|
||||||
Since time-series data is most likely to be structured data, TDengine adopts the traditional relational database model to process them with a short learning curve. You need to create a database, create tables, then insert data points and execute queries to explore the data.
|
Since time-series data is most likely to be structured data, TDengine adopts the traditional relational database model to process them with a short learning curve. You need to create a database, create tables, then insert data points and execute queries to explore the data.
|
||||||
|
|
||||||
To make full use of time-series data characteristics, TDengine adopts a strategy of "**One Table for One Data Collection Point**". TDengine requires the user to create a table for each data collection point (DCP) to store collected time-series data. For example, if there are over 10 million smart meters, it means 10 million tables should be created. For the table above, 4 tables should be created for devices D1001, D1002, D1003, and D1004 to store the data collected. This design has several benefits:
|
To make full use of time-series data characteristics, TDengine adopts a strategy of "**One Table for One Data Collection Point**". TDengine requires the user to create a table for each data collection point (DCP) to store collected time-series data. For example, if there are over 10 million smart meters, it means 10 million tables should be created. For the table above, 4 tables should be created for devices d1001, d1002, d1003, and d1004 to store the data collected. This design has several benefits:
|
||||||
|
|
||||||
1. Since the metric data from different DCP are fully independent, the data source of each DCP is unique, and a table has only one writer. In this way, data points can be written in a lock-free manner, and the writing speed can be greatly improved.
|
1. Since the metric data from different DCP are fully independent, the data source of each DCP is unique, and a table has only one writer. In this way, data points can be written in a lock-free manner, and the writing speed can be greatly improved.
|
||||||
2. For a DCP, the metric data generated by DCP is ordered by timestamp, so the write operation can be implemented by simple appending, which further greatly improves the data writing speed.
|
2. For a DCP, the metric data generated by DCP is ordered by timestamp, so the write operation can be implemented by simple appending, which further greatly improves the data writing speed.
|
||||||
3. The metric data from a DCP is continuously stored, block by block. If you read data for a period of time, it can greatly reduce random read operations and improve read and query performance by orders of magnitude.
|
3. The metric data from a DCP is continuously stored, block by block. If you read data for a period of time, it can greatly reduce random read operations and improve read and query performance by orders of magnitude.
|
||||||
4. Inside a data block for a DCP, columnar storage is used, and different compression algorithms are used for different data types. Metrics generally don't vary as significantly between themselves over a time range as compared to other metrics, which allows for a higher compression rate.
|
4. Inside a data block for a DCP, columnar storage is used, and different compression algorithms are used for different data types. Metrics generally don't vary as significantly between themselves over a time range as compared to other metrics, which allows for a higher compression rate.
|
||||||
|
|
||||||
If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. ** One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.**
|
If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.**
|
||||||
|
|
||||||
TDengine suggests using DCP ID as the table name (like D1001 in the above table). Each DCP may collect one or multiple metrics (like the current, voltage, phase as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored. Column wise storage is used.
|
TDengine suggests using DCP ID as the table name (like d1001 in the above table). Each DCP may collect one or multiple metrics (like the `current`, `voltage`, `phase` as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the timestamp as the index, and won’t build the index on any metrics stored. Column wise storage is used.
|
||||||
|
|
||||||
Complex devices, such as connected cars, may have multiple DCPs. In this case, multiple tables are created for a single device, one table per DCP.
|
Complex devices, such as connected cars, may have multiple DCPs. In this case, multiple tables are created for a single device, one table per DCP.
|
||||||
|
|
||||||
|
@ -156,9 +155,16 @@ The relationship between a STable and the subtables created based on this STable
|
||||||
|
|
||||||
Queries can be executed on both a table (subtable) and a STable. For a query on a STable, TDengine will treat the data in all its subtables as a whole data set for processing. TDengine will first find the subtables that meet the tag filter conditions, then scan the time-series data of these subtables to perform aggregation operation, which reduces the number of data sets to be scanned which in turn greatly improves the performance of data aggregation across multiple DCPs. In essence, querying a supertable is a very efficient aggregate query on multiple DCPs of the same type.
|
Queries can be executed on both a table (subtable) and a STable. For a query on a STable, TDengine will treat the data in all its subtables as a whole data set for processing. TDengine will first find the subtables that meet the tag filter conditions, then scan the time-series data of these subtables to perform aggregation operation, which reduces the number of data sets to be scanned which in turn greatly improves the performance of data aggregation across multiple DCPs. In essence, querying a supertable is a very efficient aggregate query on multiple DCPs of the same type.
|
||||||
|
|
||||||
In TDengine, it is recommended to use a subtable instead of a regular table for a DCP. In the smart meters example, we can create subtables like d1001, d1002, d1003, and d1004 under super table meters.
|
In TDengine, it is recommended to use a subtable instead of a regular table for a DCP. In the smart meters example, we can create subtables like d1001, d1002, d1003, and d1004 under super table `meters`.
|
||||||
|
|
||||||
To better understand the data model using metri, tags, super table and subtable, please refer to the diagram below which demonstrates the data model of the smart meters example. 
|
To better understand the data model using metrics, tags, super table and subtable, please refer to the diagram below which demonstrates the data model of the smart meters example.
|
||||||
|
|
||||||
|
<figure>
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
<center><figcaption>Figure 1. Meters Data Model Diagram</figcaption></center>
|
||||||
|
</figure>
|
||||||
|
|
||||||
## Database
|
## Database
|
||||||
|
|
||||||
|
@ -172,4 +178,4 @@ FQDN (Fully Qualified Domain Name) is the full domain name of a specific compute
|
||||||
|
|
||||||
Each node of a TDengine cluster is uniquely identified by an End Point, which consists of an FQDN and a Port, such as h1.tdengine.com:6030. In this way, when the IP changes, we can still use the FQDN to dynamically find the node without changing any configuration of the cluster. In addition, FQDN is used to facilitate unified access to the same cluster from the Intranet and the Internet.
|
Each node of a TDengine cluster is uniquely identified by an End Point, which consists of an FQDN and a Port, such as h1.tdengine.com:6030. In this way, when the IP changes, we can still use the FQDN to dynamically find the node without changing any configuration of the cluster. In addition, FQDN is used to facilitate unified access to the same cluster from the Intranet and the Internet.
|
||||||
|
|
||||||
TDengine does not recommend using an IP address to access the cluster. FQDN is recommended for cluster management.
|
TDengine does not recommend using an IP address to access the cluster. FQDN is recommended for cluster management.
|
||||||
|
|
|
@ -13,7 +13,7 @@ If Docker is already installed on your computer, run the following command:
|
||||||
docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
|
docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that TDengine Server uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed.
|
Note that TDengine Server 3.0 uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed.
|
||||||
|
|
||||||
Run the following command to ensure that your container is running:
|
Run the following command to ensure that your container is running:
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ Run the following command to ensure that your container is running:
|
||||||
docker ps
|
docker ps
|
||||||
```
|
```
|
||||||
|
|
||||||
Enter the container and open the bash shell:
|
Enter the container and open the `bash` shell:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
docker exec -it <container name> bash
|
docker exec -it <container name> bash
|
||||||
|
@ -31,68 +31,68 @@ You can now access TDengine or run other Linux commands.
|
||||||
|
|
||||||
Note: For information about installing docker, see the [official documentation](https://docs.docker.com/get-docker/).
|
Note: For information about installing docker, see the [official documentation](https://docs.docker.com/get-docker/).
|
||||||
|
|
||||||
## Insert Data into TDengine
|
|
||||||
|
|
||||||
You can use the `taosBenchmark` tool included with TDengine to write test data into your deployment.
|
|
||||||
|
|
||||||
To do so, run the following command:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ taosBenchmark
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`.
|
|
||||||
|
|
||||||
The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required depends on the hardware specifications of the local system.
|
|
||||||
|
|
||||||
You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](/reference/taosbenchmark).
|
|
||||||
|
|
||||||
## Open the TDengine CLI
|
## Open the TDengine CLI
|
||||||
|
|
||||||
On the container, run the following command to open the TDengine CLI:
|
On the container, run the following command to open the TDengine CLI:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ taos
|
$ taos
|
||||||
|
|
||||||
taos>
|
taos>
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Query Data in TDengine
|
## Test data insert performance
|
||||||
|
|
||||||
After using taosBenchmark to create your test deployment, you can run queries in the TDengine CLI to test its performance. For example:
|
After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
|
||||||
|
|
||||||
From the TDengine CLI query the number of rows in the `meters` supertable:
|
Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) in a Linux or Windows terminal.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
taosBenchmark
|
||||||
|
```
|
||||||
|
|
||||||
|
This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `California.Campbell`, `California.Cupertino`, `California.LosAngeles`, `California.MountainView`, `California.PaloAlto`, `California.SanDiego`, `California.SanFrancisco`, `California.SanJose`, `California.SantaClara` or `California.Sunnyvale`.
|
||||||
|
|
||||||
|
The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in ten to twenty seconds.
|
||||||
|
|
||||||
|
You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](../../reference/taosbenchmark).
|
||||||
|
|
||||||
|
## Test data query performance
|
||||||
|
|
||||||
|
After using `taosBenchmark` to create your test deployment, you can run queries in the TDengine CLI to test its performance:
|
||||||
|
|
||||||
|
From the TDengine CLI (taos) query the number of rows in the `meters` supertable:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
select count(*) from test.meters;
|
SELECT COUNT(*) FROM test.meters;
|
||||||
```
|
```
|
||||||
|
|
||||||
Query the average, maximum, and minimum values of all 100 million rows of data:
|
Query the average, maximum, and minimum values of all 100 million rows of data:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
select avg(current), max(voltage), min(phase) from test.meters;
|
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
|
||||||
```
|
```
|
||||||
|
|
||||||
Query the number of rows whose `location` tag is `San Francisco`:
|
Query the number of rows whose `location` tag is `California.SanFrancisco`:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
select count(*) from test.meters where location="San Francisco";
|
SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco";
|
||||||
```
|
```
|
||||||
|
|
||||||
Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`:
|
Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
|
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10;
|
||||||
```
|
```
|
||||||
|
|
||||||
Query the average, maximum, and minimum values for table `d10` in 1 second intervals:
|
Query the average, maximum, and minimum values for table `d10` in 10 second intervals:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
select first(ts), avg(current), max(voltage), min(phase) from test.d10 interval(1s);
|
SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(10s);
|
||||||
```
|
```
|
||||||
In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be _wstart which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
|
|
||||||
|
In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be `\_wstart` which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
|
||||||
|
|
||||||
## Additional Information
|
## Additional Information
|
||||||
|
|
||||||
|
|
|
@ -9,23 +9,24 @@ import PkgListV3 from "/components/PkgListV3";
|
||||||
|
|
||||||
For information about installing TDengine on Docker, see [Quick Install on Docker](../../get-started/docker). If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
|
For information about installing TDengine on Docker, see [Quick Install on Docker](../../get-started/docker). If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
|
||||||
|
|
||||||
The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. Note that taosAdapter supports Linux only. In addition to connectors for multiple languages, TDengine also provides a [REST API](../../reference/rest-api) through [taosAdapter](../../reference/taosadapter).
|
The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface (CLI, taos), and some tools. Note that taosAdapter supports Linux only. In addition to connectors for multiple languages, TDengine also provides a [REST API](../../reference/rest-api) through [taosAdapter](../../reference/taosadapter).
|
||||||
|
|
||||||
The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download a lite package that includes only `taosd` and the C/C++ connector.
|
The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download the Lite package that includes only `taosd` and the C/C++ connector.
|
||||||
|
|
||||||
The TDengine Community Edition is released as .deb and .rpm packages. The .deb package can be installed on Debian, Ubuntu, and derivative systems. The .rpm package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the .deb or .rpm package, download and install taosTools separately. TDengine can also be installed on 64-bit Windows servers.
|
The TDengine Community Edition is released as Deb and RPM packages. The Deb package can be installed on Debian, Ubuntu, and derivative systems. The RPM package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the Deb or RPM package, download and install taosTools separately. TDengine can also be installed on 64-bit Windows.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
<Tabs>
|
<Tabs>
|
||||||
<TabItem label=".deb" value="debinst">
|
<TabItem label=".deb" value="debinst">
|
||||||
|
|
||||||
1. Download the .deb installation package.
|
1. Download the Deb installation package.
|
||||||
<PkgListV3 type={6}/>
|
<PkgListV3 type={6}/>
|
||||||
2. In the directory where the package is located, use `dpkg` to install the package:
|
2. In the directory where the package is located, use `dpkg` to install the package:
|
||||||
|
|
||||||
|
> Please replace `<version>` with the corresponding version of the package downloaded
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Enter the name of the package that you downloaded.
|
|
||||||
sudo dpkg -i TDengine-server-<version>-Linux-x64.deb
|
sudo dpkg -i TDengine-server-<version>-Linux-x64.deb
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -34,11 +35,12 @@ sudo dpkg -i TDengine-server-<version>-Linux-x64.deb
|
||||||
<TabItem label=".rpm" value="rpminst">
|
<TabItem label=".rpm" value="rpminst">
|
||||||
|
|
||||||
1. Download the .rpm installation package.
|
1. Download the .rpm installation package.
|
||||||
<PkgListV3 type={5}/>
|
<PkgListV3 type={5}/>
|
||||||
2. In the directory where the package is located, use rpm to install the package:
|
2. In the directory where the package is located, use rpm to install the package:
|
||||||
|
|
||||||
|
> Please replace `<version>` with the corresponding version of the package downloaded
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Enter the name of the package that you downloaded.
|
|
||||||
sudo rpm -ivh TDengine-server-<version>-Linux-x64.rpm
|
sudo rpm -ivh TDengine-server-<version>-Linux-x64.rpm
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -47,11 +49,12 @@ sudo rpm -ivh TDengine-server-<version>-Linux-x64.rpm
|
||||||
<TabItem label=".tar.gz" value="tarinst">
|
<TabItem label=".tar.gz" value="tarinst">
|
||||||
|
|
||||||
1. Download the .tar.gz installation package.
|
1. Download the .tar.gz installation package.
|
||||||
<PkgListV3 type={0}/>
|
<PkgListV3 type={0}/>
|
||||||
2. In the directory where the package is located, use `tar` to decompress the package:
|
2. In the directory where the package is located, use `tar` to decompress the package:
|
||||||
|
|
||||||
|
> Please replace `<version>` with the corresponding version of the package downloaded
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Enter the name of the package that you downloaded.
|
|
||||||
tar -zxvf TDengine-server-<version>-Linux-x64.tar.gz
|
tar -zxvf TDengine-server-<version>-Linux-x64.tar.gz
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -96,23 +99,23 @@ sudo apt-get install tdengine
|
||||||
This installation method is supported only for Debian and Ubuntu.
|
This installation method is supported only for Debian and Ubuntu.
|
||||||
::::
|
::::
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem label="Windows" value="windows">
|
<TabItem label="Windows" value="windows">
|
||||||
|
|
||||||
Note: TDengine only supports Windows Server 2016/2019 and windows 10/11 system versions on the windows platform.
|
Note: TDengine only supports Windows Server 2016/2019 and Windows 10/11 on the Windows platform.
|
||||||
|
|
||||||
1. Download the Windows installation package.
|
1. Download the Windows installation package.
|
||||||
<PkgListV3 type={3}/>
|
<PkgListV3 type={3}/>
|
||||||
2. Run the downloaded package to install TDengine.
|
2. Run the downloaded package to install TDengine.
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
For information about TDengine releases, see [Release History](../../releases).
|
For information about TDengine releases, see [Release History](../../releases).
|
||||||
:::
|
:::
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
On the first node in your TDengine cluster, leave the `Enter FQDN:` prompt blank and press **Enter**. On subsequent nodes, you can enter the end point of the first dnode in the cluster. You can also configure this setting after you have finished installing TDengine.
|
On the first node in your TDengine cluster, leave the `Enter FQDN:` prompt blank and press **Enter**. On subsequent nodes, you can enter the endpoint of the first dnode in the cluster. You can also configure this setting after you have finished installing TDengine.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
@ -147,7 +150,7 @@ Active: inactive (dead)
|
||||||
|
|
||||||
After confirming that TDengine is running, run the `taos` command to access the TDengine CLI.
|
After confirming that TDengine is running, run the `taos` command to access the TDengine CLI.
|
||||||
|
|
||||||
The following `systemctl` commands can help you manage TDengine:
|
The following `systemctl` commands can help you manage TDengine service:
|
||||||
|
|
||||||
- Start TDengine Server: `systemctl start taosd`
|
- Start TDengine Server: `systemctl start taosd`
|
||||||
|
|
||||||
|
@ -159,7 +162,7 @@ The following `systemctl` commands can help you manage TDengine:
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
|
|
||||||
- The `systemctl` command requires _root_ privileges. If you are not logged in as the `root` user, use the `sudo` command.
|
- The `systemctl` command requires _root_ privileges. If you are not logged in as the _root_ user, use the `sudo` command.
|
||||||
- The `systemctl stop taosd` command does not instantly stop TDengine Server. The server is stopped only after all data in memory is flushed to disk. The time required depends on the cache size.
|
- The `systemctl stop taosd` command does not instantly stop TDengine Server. The server is stopped only after all data in memory is flushed to disk. The time required depends on the cache size.
|
||||||
- If your system does not include `systemd`, you can run `/usr/local/taos/bin/taosd` to start TDengine manually.
|
- If your system does not include `systemd`, you can run `/usr/local/taos/bin/taosd` to start TDengine manually.
|
||||||
|
|
||||||
|
@ -174,23 +177,9 @@ After the installation is complete, run `C:\TDengine\taosd.exe` to start TDengin
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
## Test data insert performance
|
## Command Line Interface (CLI)
|
||||||
|
|
||||||
After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
|
You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, you can execute `taos` in the Linux terminal where TDengine is installed, or you can run `taos.exe` in the `C:\TDengine` directory of the Windows terminal where TDengine is installed to start the TDengine command line.
|
||||||
|
|
||||||
```bash
|
|
||||||
taosBenchmark
|
|
||||||
```
|
|
||||||
|
|
||||||
This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`.
|
|
||||||
|
|
||||||
The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in less than a minute.
|
|
||||||
|
|
||||||
You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](../../reference/taosbenchmark).
|
|
||||||
|
|
||||||
## Command Line Interface
|
|
||||||
|
|
||||||
You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, run the following command:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
taos
|
taos
|
||||||
|
@ -205,52 +194,71 @@ taos>
|
||||||
For example, you can create and delete databases and tables and run all types of queries. Each SQL command must be end with a semicolon (;). For example:
|
For example, you can create and delete databases and tables and run all types of queries. Each SQL command must be end with a semicolon (;). For example:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
create database demo;
|
CREATE DATABASE demo;
|
||||||
use demo;
|
USE demo;
|
||||||
create table t (ts timestamp, speed int);
|
CREATE TABLE t (ts TIMESTAMP, speed INT);
|
||||||
insert into t values ('2019-07-15 00:00:00', 10);
|
INSERT INTO t VALUES ('2019-07-15 00:00:00', 10);
|
||||||
insert into t values ('2019-07-15 01:00:00', 20);
|
INSERT INTO t VALUES ('2019-07-15 01:00:00', 20);
|
||||||
select * from t;
|
SELECT * FROM t;
|
||||||
|
|
||||||
ts | speed |
|
ts | speed |
|
||||||
========================================
|
========================================
|
||||||
2019-07-15 00:00:00.000 | 10 |
|
2019-07-15 00:00:00.000 | 10 |
|
||||||
2019-07-15 01:00:00.000 | 20 |
|
2019-07-15 01:00:00.000 | 20 |
|
||||||
|
|
||||||
Query OK, 2 row(s) in set (0.003128s)
|
Query OK, 2 row(s) in set (0.003128s)
|
||||||
```
|
```
|
||||||
|
|
||||||
You can also can monitor the deployment status, add and remove user accounts, and manage running instances. You can run the TDengine CLI on either Linux or Windows machines. For more information, see [TDengine CLI](../../reference/taos-shell/).
|
You can also can monitor the deployment status, add and remove user accounts, and manage running instances. You can run the TDengine CLI on either Linux or Windows machines. For more information, see [TDengine CLI](../../reference/taos-shell/).
|
||||||
|
|
||||||
|
## Test data insert performance
|
||||||
|
|
||||||
|
After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
|
||||||
|
|
||||||
|
Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) in a Linux or Windows terminal.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
taosBenchmark
|
||||||
|
```
|
||||||
|
|
||||||
|
This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `California.Campbell`, `California.Cupertino`, `California.LosAngeles`, `California.MountainView`, `California.PaloAlto`, `California.SanDiego`, `California.SanFrancisco`, `California.SanJose`, `California.SantaClara` or `California.Sunnyvale`.
|
||||||
|
|
||||||
|
The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in ten to twenty seconds.
|
||||||
|
|
||||||
|
You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](../../reference/taosbenchmark).
|
||||||
|
|
||||||
## Test data query performance
|
## Test data query performance
|
||||||
|
|
||||||
After using taosBenchmark to create your test deployment, you can run queries in the TDengine CLI to test its performance:
|
After using `taosBenchmark` to create your test deployment, you can run queries in the TDengine CLI to test its performance:
|
||||||
|
|
||||||
From the TDengine CLI query the number of rows in the `meters` supertable:
|
From the TDengine CLI (taos) query the number of rows in the `meters` supertable:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
select count(*) from test.meters;
|
SELECT COUNT(*) FROM test.meters;
|
||||||
```
|
```
|
||||||
|
|
||||||
Query the average, maximum, and minimum values of all 100 million rows of data:
|
Query the average, maximum, and minimum values of all 100 million rows of data:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
select avg(current), max(voltage), min(phase) from test.meters;
|
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
|
||||||
```
|
```
|
||||||
|
|
||||||
Query the number of rows whose `location` tag is `San Francisco`:
|
Query the number of rows whose `location` tag is `California.SanFrancisco`:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
select count(*) from test.meters where location="San Francisco";
|
SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco";
|
||||||
```
|
```
|
||||||
|
|
||||||
Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`:
|
Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
|
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10;
|
||||||
```
|
```
|
||||||
|
|
||||||
Query the average, maximum, and minimum values for table `d10` in 1 second intervals:
|
Query the average, maximum, and minimum values for table `d10` in 10 second intervals:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
select first(ts), avg(current), max(voltage), min(phase) from test.d10 interval(1s);
|
SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(10s);
|
||||||
```
|
```
|
||||||
In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be _wstart which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
|
|
||||||
|
In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be `\_wstart` which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
|
||||||
|
|
|
@ -52,11 +52,6 @@ window_clause: {
|
||||||
| STATE_WINDOW(col)
|
| STATE_WINDOW(col)
|
||||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||||
|
|
||||||
changes_option: {
|
|
||||||
DURATION duration_val
|
|
||||||
| ROWS rows_val
|
|
||||||
}
|
|
||||||
|
|
||||||
group_by_clause:
|
group_by_clause:
|
||||||
GROUP BY expr [, expr] ... HAVING condition
|
GROUP BY expr [, expr] ... HAVING condition
|
||||||
|
|
||||||
|
@ -126,7 +121,6 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name;
|
||||||
|
|
||||||
1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1,000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output.
|
1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1,000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output.
|
||||||
2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision errors in floating point numbers.
|
2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision errors in floating point numbers.
|
||||||
3. `DISTINCT` can't be used in the sub-query of a nested query statement, and can't be used together with aggregate functions, `GROUP BY` or `JOIN` in the same SQL statement.
|
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
|
|
@ -126,7 +126,7 @@ SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The rounded down value of a specific field
|
**Description**: The rounded down value of a specific field
|
||||||
**More explanations**: The restrictions are same as those of the `CEIL` function.
|
**More explanations**: The restrictions are same as those of the `CEIL` function.
|
||||||
|
|
||||||
#### LOG
|
#### LOG
|
||||||
|
@ -173,7 +173,7 @@ SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The rounded value of a specific field.
|
**Description**: The rounded value of a specific field.
|
||||||
**More explanations**: The restrictions are same as those of the `CEIL` function.
|
**More explanations**: The restrictions are same as those of the `CEIL` function.
|
||||||
|
|
||||||
|
|
||||||
|
@ -434,7 +434,7 @@ SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
**More explanations**:
|
**More explanations**:
|
||||||
|
|
||||||
- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。 For example, TO_ISO8601(1, "+00:00").
|
- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。 For example, TO_ISO8601(1, "+00:00").
|
||||||
- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp
|
- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp
|
||||||
- If the input is a column of TIMESTAMP type, the precision of the returned value is same as the precision set for the current data base in use
|
- If the input is a column of TIMESTAMP type, the precision of the returned value is same as the precision set for the current data base in use
|
||||||
|
|
||||||
|
|
||||||
|
@ -613,6 +613,7 @@ SELECT APERCENTILE(field_name, P[, algo_type]) FROM { tb_name | stb_name } [WHER
|
||||||
**Explanations**:
|
**Explanations**:
|
||||||
- _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX.
|
- _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX.
|
||||||
- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default.
|
- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default.
|
||||||
|
- The approximation result of `t-digest` algorithm is sensitive to input data order. For example, when querying STable with different input data order there might be minor differences in calculated results.
|
||||||
|
|
||||||
### AVG
|
### AVG
|
||||||
|
|
||||||
|
@ -768,14 +769,14 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam
|
||||||
|
|
||||||
**Explanations**:
|
**Explanations**:
|
||||||
- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。
|
- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。
|
||||||
- bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively:
|
- bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively:
|
||||||
- "user_input": "[1, 3, 5, 7]":
|
- "user_input": "[1, 3, 5, 7]":
|
||||||
User specified bin values.
|
User specified bin values.
|
||||||
|
|
||||||
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
|
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
|
||||||
"start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins.
|
"start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins.
|
||||||
The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf].
|
The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf].
|
||||||
|
|
||||||
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
|
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
|
||||||
"start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins.
|
"start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins.
|
||||||
The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf].
|
The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf].
|
||||||
|
@ -861,9 +862,9 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] RA
|
||||||
|
|
||||||
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
|
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
|
||||||
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
|
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
|
||||||
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
|
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
|
||||||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter.
|
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter.
|
||||||
- Interpolation is performed based on `FILL` parameter.
|
- Interpolation is performed based on `FILL` parameter.
|
||||||
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
|
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
|
||||||
|
|
||||||
### LAST
|
### LAST
|
||||||
|
@ -967,7 +968,7 @@ SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
|
|
||||||
**Applicable table types**: standard tables and supertables
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
**More explanations**:
|
**More explanations**:
|
||||||
|
|
||||||
This function cannot be used in expression calculation.
|
This function cannot be used in expression calculation.
|
||||||
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
|
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
|
||||||
|
@ -1045,10 +1046,10 @@ SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
|
|
||||||
**Applicable table types**: standard tables and supertables
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
**More explanations**:
|
**More explanations**:
|
||||||
|
|
||||||
- Arithmetic operation can't be performed on the result of `csum` function
|
- Arithmetic operation can't be performed on the result of `csum` function
|
||||||
- Can only be used with aggregate functions This function can be used with supertables and standard tables.
|
- Can only be used with aggregate functions This function can be used with supertables and standard tables.
|
||||||
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
|
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
|
||||||
|
|
||||||
|
|
||||||
|
@ -1066,8 +1067,8 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER
|
||||||
|
|
||||||
**Applicable table types**: standard tables and supertables
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
**More explanation**:
|
**More explanation**:
|
||||||
|
|
||||||
- It can be used together with `PARTITION BY tbname` against a STable.
|
- It can be used together with `PARTITION BY tbname` against a STable.
|
||||||
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from。
|
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from。
|
||||||
|
|
||||||
|
@ -1085,7 +1086,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER
|
||||||
|
|
||||||
**Applicable table types**: standard tables and supertables
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
**More explanation**:
|
**More explanation**:
|
||||||
|
|
||||||
- The number of result rows is the number of rows subtracted by one, no output for the first row
|
- The number of result rows is the number of rows subtracted by one, no output for the first row
|
||||||
- It can be used together with a selected column. For example: select \_rowts, DIFF() from。
|
- It can be used together with a selected column. For example: select \_rowts, DIFF() from。
|
||||||
|
@ -1122,9 +1123,9 @@ SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
|
|
||||||
**Applicable table types**: standard tables and supertables
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
**More explanations**:
|
**More explanations**:
|
||||||
|
|
||||||
- Arithmetic operation can't be performed on the result of `MAVG`.
|
- Arithmetic operation can't be performed on the result of `MAVG`.
|
||||||
- Can only be used with data columns, can't be used with tags. - Can't be used with aggregate functions.
|
- Can only be used with data columns, can't be used with tags. - Can't be used with aggregate functions.
|
||||||
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
|
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ For example, the following SQL statement creates a stream and automatically crea
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE STREAM avg_vol_s INTO avg_vol AS
|
CREATE STREAM avg_vol_s INTO avg_vol AS
|
||||||
SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
|
SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
|
||||||
```
|
```
|
||||||
|
|
||||||
## Delete a Stream
|
## Delete a Stream
|
||||||
|
|
|
@ -5,7 +5,9 @@ title: Reserved Keywords
|
||||||
|
|
||||||
## Keyword List
|
## Keyword List
|
||||||
|
|
||||||
There are about 200 keywords reserved by TDengine, they can't be used as the name of database, STable or table with either upper case, lower case or mixed case. The following list shows all reserved keywords:
|
There are more than 200 keywords reserved by TDengine, they can't be used as the name of database, table, STable, subtable, column or tag with either upper case, lower case or mixed case. If you need to use these keywords, use the symbol `` ` `` to enclose the keywords, e.g. \`ADD\`.
|
||||||
|
|
||||||
|
The following list shows all reserved keywords:
|
||||||
|
|
||||||
### A
|
### A
|
||||||
|
|
||||||
|
@ -14,15 +16,20 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
||||||
- ACCOUNTS
|
- ACCOUNTS
|
||||||
- ADD
|
- ADD
|
||||||
- AFTER
|
- AFTER
|
||||||
|
- AGGREGATE
|
||||||
- ALL
|
- ALL
|
||||||
- ALTER
|
- ALTER
|
||||||
|
- ANALYZE
|
||||||
- AND
|
- AND
|
||||||
|
- APPS
|
||||||
- AS
|
- AS
|
||||||
- ASC
|
- ASC
|
||||||
|
- AT_ONCE
|
||||||
- ATTACH
|
- ATTACH
|
||||||
|
|
||||||
### B
|
### B
|
||||||
|
|
||||||
|
- BALANCE
|
||||||
- BEFORE
|
- BEFORE
|
||||||
- BEGIN
|
- BEGIN
|
||||||
- BETWEEN
|
- BETWEEN
|
||||||
|
@ -32,19 +39,27 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
||||||
- BITNOT
|
- BITNOT
|
||||||
- BITOR
|
- BITOR
|
||||||
- BLOCKS
|
- BLOCKS
|
||||||
|
- BNODE
|
||||||
|
- BNODES
|
||||||
- BOOL
|
- BOOL
|
||||||
|
- BUFFER
|
||||||
|
- BUFSIZE
|
||||||
- BY
|
- BY
|
||||||
|
|
||||||
### C
|
### C
|
||||||
|
|
||||||
- CACHE
|
- CACHE
|
||||||
- CACHELAST
|
- CACHEMODEL
|
||||||
|
- CACHESIZE
|
||||||
- CASCADE
|
- CASCADE
|
||||||
|
- CAST
|
||||||
- CHANGE
|
- CHANGE
|
||||||
|
- CLIENT_VERSION
|
||||||
- CLUSTER
|
- CLUSTER
|
||||||
- COLON
|
- COLON
|
||||||
- COLUMN
|
- COLUMN
|
||||||
- COMMA
|
- COMMA
|
||||||
|
- COMMENT
|
||||||
- COMP
|
- COMP
|
||||||
- COMPACT
|
- COMPACT
|
||||||
- CONCAT
|
- CONCAT
|
||||||
|
@ -52,15 +67,18 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
||||||
- CONNECTION
|
- CONNECTION
|
||||||
- CONNECTIONS
|
- CONNECTIONS
|
||||||
- CONNS
|
- CONNS
|
||||||
|
- CONSUMER
|
||||||
|
- CONSUMERS
|
||||||
|
- CONTAINS
|
||||||
- COPY
|
- COPY
|
||||||
|
- COUNT
|
||||||
- CREATE
|
- CREATE
|
||||||
- CTIME
|
- CURRENT_USER
|
||||||
|
|
||||||
### D
|
### D
|
||||||
|
|
||||||
- DATABASE
|
- DATABASE
|
||||||
- DATABASES
|
- DATABASES
|
||||||
- DAYS
|
|
||||||
- DBS
|
- DBS
|
||||||
- DEFERRED
|
- DEFERRED
|
||||||
- DELETE
|
- DELETE
|
||||||
|
@ -69,18 +87,23 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
||||||
- DESCRIBE
|
- DESCRIBE
|
||||||
- DETACH
|
- DETACH
|
||||||
- DISTINCT
|
- DISTINCT
|
||||||
|
- DISTRIBUTED
|
||||||
- DIVIDE
|
- DIVIDE
|
||||||
- DNODE
|
- DNODE
|
||||||
- DNODES
|
- DNODES
|
||||||
- DOT
|
- DOT
|
||||||
- DOUBLE
|
- DOUBLE
|
||||||
- DROP
|
- DROP
|
||||||
|
- DURATION
|
||||||
|
|
||||||
### E
|
### E
|
||||||
|
|
||||||
|
- EACH
|
||||||
|
- ENABLE
|
||||||
- END
|
- END
|
||||||
- EQ
|
- EVERY
|
||||||
- EXISTS
|
- EXISTS
|
||||||
|
- EXPIRED
|
||||||
- EXPLAIN
|
- EXPLAIN
|
||||||
|
|
||||||
### F
|
### F
|
||||||
|
@ -88,18 +111,20 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
||||||
- FAIL
|
- FAIL
|
||||||
- FILE
|
- FILE
|
||||||
- FILL
|
- FILL
|
||||||
|
- FIRST
|
||||||
- FLOAT
|
- FLOAT
|
||||||
|
- FLUSH
|
||||||
- FOR
|
- FOR
|
||||||
- FROM
|
- FROM
|
||||||
- FSYNC
|
- FUNCTION
|
||||||
|
- FUNCTIONS
|
||||||
|
|
||||||
### G
|
### G
|
||||||
|
|
||||||
- GE
|
|
||||||
- GLOB
|
- GLOB
|
||||||
|
- GRANT
|
||||||
- GRANTS
|
- GRANTS
|
||||||
- GROUP
|
- GROUP
|
||||||
- GT
|
|
||||||
|
|
||||||
### H
|
### H
|
||||||
|
|
||||||
|
@ -110,15 +135,18 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
||||||
- ID
|
- ID
|
||||||
- IF
|
- IF
|
||||||
- IGNORE
|
- IGNORE
|
||||||
- IMMEDIA
|
- IMMEDIATE
|
||||||
- IMPORT
|
- IMPORT
|
||||||
- IN
|
- IN
|
||||||
- INITIAL
|
- INDEX
|
||||||
|
- INDEXES
|
||||||
|
- INITIALLY
|
||||||
|
- INNER
|
||||||
- INSERT
|
- INSERT
|
||||||
- INSTEAD
|
- INSTEAD
|
||||||
- INT
|
- INT
|
||||||
- INTEGER
|
- INTEGER
|
||||||
- INTERVA
|
- INTERVAL
|
||||||
- INTO
|
- INTO
|
||||||
- IS
|
- IS
|
||||||
- ISNULL
|
- ISNULL
|
||||||
|
@ -126,6 +154,7 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
||||||
### J
|
### J
|
||||||
|
|
||||||
- JOIN
|
- JOIN
|
||||||
|
- JSON
|
||||||
|
|
||||||
### K
|
### K
|
||||||
|
|
||||||
|
@ -135,46 +164,57 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
||||||
|
|
||||||
### L
|
### L
|
||||||
|
|
||||||
- LE
|
- LAST
|
||||||
|
- LAST_ROW
|
||||||
|
- LICENCES
|
||||||
- LIKE
|
- LIKE
|
||||||
- LIMIT
|
- LIMIT
|
||||||
- LINEAR
|
- LINEAR
|
||||||
- LOCAL
|
- LOCAL
|
||||||
- LP
|
|
||||||
- LSHIFT
|
|
||||||
- LT
|
|
||||||
|
|
||||||
### M
|
### M
|
||||||
|
|
||||||
- MATCH
|
- MATCH
|
||||||
|
- MAX_DELAY
|
||||||
- MAXROWS
|
- MAXROWS
|
||||||
|
- MERGE
|
||||||
|
- META
|
||||||
- MINROWS
|
- MINROWS
|
||||||
- MINUS
|
- MINUS
|
||||||
|
- MNODE
|
||||||
- MNODES
|
- MNODES
|
||||||
- MODIFY
|
- MODIFY
|
||||||
- MODULES
|
- MODULES
|
||||||
|
|
||||||
### N
|
### N
|
||||||
|
|
||||||
- NE
|
- NCHAR
|
||||||
|
- NEXT
|
||||||
|
- NMATCH
|
||||||
- NONE
|
- NONE
|
||||||
- NOT
|
- NOT
|
||||||
- NOTNULL
|
- NOTNULL
|
||||||
- NOW
|
- NOW
|
||||||
- NULL
|
- NULL
|
||||||
|
- NULLS
|
||||||
|
|
||||||
### O
|
### O
|
||||||
|
|
||||||
- OF
|
- OF
|
||||||
- OFFSET
|
- OFFSET
|
||||||
|
- ON
|
||||||
- OR
|
- OR
|
||||||
- ORDER
|
- ORDER
|
||||||
|
- OUTPUTTYPE
|
||||||
|
|
||||||
### P
|
### P
|
||||||
|
|
||||||
- PARTITION
|
- PAGES
|
||||||
|
- PAGESIZE
|
||||||
|
- PARTITIONS
|
||||||
- PASS
|
- PASS
|
||||||
- PLUS
|
- PLUS
|
||||||
|
- PORT
|
||||||
- PPS
|
- PPS
|
||||||
- PRECISION
|
- PRECISION
|
||||||
- PREV
|
- PREV
|
||||||
|
@ -182,47 +222,63 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
||||||
|
|
||||||
### Q
|
### Q
|
||||||
|
|
||||||
|
- QNODE
|
||||||
|
- QNODES
|
||||||
- QTIME
|
- QTIME
|
||||||
- QUERIE
|
- QUERIES
|
||||||
- QUERY
|
- QUERY
|
||||||
- QUORUM
|
|
||||||
|
|
||||||
### R
|
### R
|
||||||
|
|
||||||
- RAISE
|
- RAISE
|
||||||
- REM
|
- RANGE
|
||||||
|
- RATIO
|
||||||
|
- READ
|
||||||
|
- REDISTRIBUTE
|
||||||
|
- RENAME
|
||||||
- REPLACE
|
- REPLACE
|
||||||
- REPLICA
|
- REPLICA
|
||||||
- RESET
|
- RESET
|
||||||
- RESTRIC
|
- RESTRICT
|
||||||
|
- RETENTIONS
|
||||||
|
- REVOKE
|
||||||
|
- ROLLUP
|
||||||
- ROW
|
- ROW
|
||||||
- RP
|
|
||||||
- RSHIFT
|
|
||||||
|
|
||||||
### S
|
### S
|
||||||
|
|
||||||
|
- SCHEMALESS
|
||||||
- SCORES
|
- SCORES
|
||||||
- SELECT
|
- SELECT
|
||||||
- SEMI
|
- SEMI
|
||||||
|
- SERVER_STATUS
|
||||||
|
- SERVER_VERSION
|
||||||
- SESSION
|
- SESSION
|
||||||
- SET
|
- SET
|
||||||
- SHOW
|
- SHOW
|
||||||
- SLASH
|
- SINGLE_STABLE
|
||||||
- SLIDING
|
- SLIDING
|
||||||
- SLIMIT
|
- SLIMIT
|
||||||
- SMALLIN
|
- SMA
|
||||||
|
- SMALLINT
|
||||||
|
- SNODE
|
||||||
|
- SNODES
|
||||||
- SOFFSET
|
- SOFFSET
|
||||||
- STable
|
- SPLIT
|
||||||
- STableS
|
- STABLE
|
||||||
|
- STABLES
|
||||||
- STAR
|
- STAR
|
||||||
- STATE
|
- STATE
|
||||||
- STATEMEN
|
- STATE_WINDOW
|
||||||
- STATE_WI
|
- STATEMENT
|
||||||
- STORAGE
|
- STORAGE
|
||||||
- STREAM
|
- STREAM
|
||||||
- STREAMS
|
- STREAMS
|
||||||
|
- STRICT
|
||||||
- STRING
|
- STRING
|
||||||
|
- SUBSCRIPTIONS
|
||||||
- SYNCDB
|
- SYNCDB
|
||||||
|
- SYSINFO
|
||||||
|
|
||||||
### T
|
### T
|
||||||
|
|
||||||
|
@ -233,19 +289,24 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
||||||
- TBNAME
|
- TBNAME
|
||||||
- TIMES
|
- TIMES
|
||||||
- TIMESTAMP
|
- TIMESTAMP
|
||||||
|
- TIMEZONE
|
||||||
- TINYINT
|
- TINYINT
|
||||||
|
- TO
|
||||||
|
- TODAY
|
||||||
- TOPIC
|
- TOPIC
|
||||||
- TOPICS
|
- TOPICS
|
||||||
|
- TRANSACTION
|
||||||
|
- TRANSACTIONS
|
||||||
- TRIGGER
|
- TRIGGER
|
||||||
|
- TRIM
|
||||||
- TSERIES
|
- TSERIES
|
||||||
|
- TTL
|
||||||
|
|
||||||
### U
|
### U
|
||||||
|
|
||||||
- UMINUS
|
|
||||||
- UNION
|
- UNION
|
||||||
- UNSIGNED
|
- UNSIGNED
|
||||||
- UPDATE
|
- UPDATE
|
||||||
- UPLUS
|
|
||||||
- USE
|
- USE
|
||||||
- USER
|
- USER
|
||||||
- USERS
|
- USERS
|
||||||
|
@ -253,9 +314,13 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
||||||
|
|
||||||
### V
|
### V
|
||||||
|
|
||||||
|
- VALUE
|
||||||
- VALUES
|
- VALUES
|
||||||
|
- VARCHAR
|
||||||
- VARIABLE
|
- VARIABLE
|
||||||
- VARIABLES
|
- VARIABLES
|
||||||
|
- VERBOSE
|
||||||
|
- VGROUP
|
||||||
- VGROUPS
|
- VGROUPS
|
||||||
- VIEW
|
- VIEW
|
||||||
- VNODES
|
- VNODES
|
||||||
|
@ -263,14 +328,25 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
||||||
### W
|
### W
|
||||||
|
|
||||||
- WAL
|
- WAL
|
||||||
|
- WAL_FSYNC_PERIOD
|
||||||
|
- WAL_LEVEL
|
||||||
|
- WAL_RETENTION_PERIOD
|
||||||
|
- WAL_RETENTION_SIZE
|
||||||
|
- WAL_ROLL_PERIOD
|
||||||
|
- WAL_SEGMENT_SIZE
|
||||||
|
- WATERMARK
|
||||||
- WHERE
|
- WHERE
|
||||||
|
- WINDOW_CLOSE
|
||||||
|
- WITH
|
||||||
|
- WRITE
|
||||||
|
|
||||||
### \_
|
### \_
|
||||||
|
|
||||||
- \_C0
|
- \_C0
|
||||||
- \_QSTART
|
|
||||||
- \_QSTOP
|
|
||||||
- \_QDURATION
|
- \_QDURATION
|
||||||
- \_WSTART
|
- \_QEND
|
||||||
- \_WSTOP
|
- \_QSTART
|
||||||
|
- \_ROWTS
|
||||||
- \_WDURATION
|
- \_WDURATION
|
||||||
|
- \_WEND
|
||||||
|
- \_WSTART
|
||||||
|
|
|
@ -5,16 +5,6 @@ title: SHOW Statement for Metadata
|
||||||
|
|
||||||
`SHOW` command can be used to get brief system information. To get details about metatadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
|
`SHOW` command can be used to get brief system information. To get details about metatadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
|
||||||
|
|
||||||
## SHOW ACCOUNTS
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SHOW ACCOUNTS;
|
|
||||||
```
|
|
||||||
|
|
||||||
Shows information about tenants on the system.
|
|
||||||
|
|
||||||
Note: TDengine Enterprise Edition only.
|
|
||||||
|
|
||||||
## SHOW APPS
|
## SHOW APPS
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
|
|
@ -155,15 +155,15 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> {
|
||||||
let inserted = taos.exec_many([
|
let inserted = taos.exec_many([
|
||||||
// create super table
|
// create super table
|
||||||
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
|
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
|
||||||
TAGS (`groupid` INT, `location` BINARY(16))",
|
TAGS (`groupid` INT, `location` BINARY(24))",
|
||||||
// create child table
|
// create child table
|
||||||
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
|
"CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')",
|
||||||
// insert into child table
|
// insert into child table
|
||||||
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
|
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
|
||||||
// insert with NULL values
|
// insert with NULL values
|
||||||
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
|
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
|
||||||
// insert and automatically create table with tags if not exists
|
// insert and automatically create table with tags if not exists
|
||||||
"INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
|
"INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119, 0.33)",
|
||||||
// insert many records in a single sql
|
// insert many records in a single sql
|
||||||
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
|
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
|
||||||
]).await?;
|
]).await?;
|
||||||
|
|
|
@ -39,14 +39,14 @@ Comparing the connector support for TDengine functional features as follows.
|
||||||
|
|
||||||
### Using the native interface (taosc)
|
### Using the native interface (taosc)
|
||||||
|
|
||||||
| **Functional Features** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** |
|
| **Functional Features** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** |
|
||||||
| -------------- | -------- | ---------- | ------ | ------ | ----------- | -------- |
|
| ----------------------------- | ------------- | ---------- | ------------- | ------------- | ------------- | ------------- |
|
||||||
| **Connection Management** | Support | Support | Support | Support | Support | Support |
|
| **Connection Management** | Support | Support | Support | Support | Support | Support |
|
||||||
| **Regular Query** | Support | Support | Support | Support | Support | Support |
|
| **Regular Query** | Support | Support | Support | Support | Support | Support |
|
||||||
| **Parameter Binding** | Support | Support | Support | Support | Support | Support |
|
| **Parameter Binding** | Support | Support | Support | Support | Support | Support |
|
||||||
| ** TMQ ** | Support | Support | Support | Support | Support | Support |
|
| **Subscription (TMQ)** | Support | Support | Support | Support | Support | Support |
|
||||||
| **Schemaless** | Support | Support | Support | Support | Support | Support |
|
| **Schemaless** | Support | Support | Support | Support | Support | Support |
|
||||||
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
|
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
The different database framework specifications for various programming languages do not mean that all C/C++ interfaces need a wrapper.
|
The different database framework specifications for various programming languages do not mean that all C/C++ interfaces need a wrapper.
|
||||||
|
@ -54,16 +54,15 @@ The different database framework specifications for various programming language
|
||||||
|
|
||||||
### Use HTTP Interfaces (REST or WebSocket)
|
### Use HTTP Interfaces (REST or WebSocket)
|
||||||
|
|
||||||
| **Functional Features** | **Java** | **Python** | **Go** | **C# (not supported yet)** | **Node.js** | **Rust** |
|
| **Functional Features** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** |
|
||||||
| ------------------------------ | -------- | ---------- | -------- | ------------------ | ----------- | -------- |
|
| -------------------------------------- | ------------- | --------------- | ------------- | ------------- | ------------- | ------------- |
|
||||||
| **Connection Management** | Support | Support | Support | N/A | Support | Support |
|
| **Connection Management** | Support | Support | Support | Support | Support | Support |
|
||||||
| **Regular Query** | Support | Support | Support | N/A | Support | Support |
|
| **Regular Query** | Support | Support | Support | Support | Support | Support |
|
||||||
| **Continous Query ** | Support | Support | Support | N/A | Support | Support |
|
| **Parameter Binding** | Not supported | Not supported | Not supported | Support | Not supported | Support |
|
||||||
| **Parameter Binding** | Not supported | Not supported | Not supported | N/A | Not supported | Support |
|
| **Subscription (TMQ) ** | Not supported | Not supported | Not supported | Not supported | Not supported | Support |
|
||||||
| ** TMQ ** | Not supported | Not supported | Not supported | N/A | Not supported | Support |
|
| **Schemaless** | Not supported | Not supported | Not supported | Not supported | Not supported | Not supported |
|
||||||
| **Schemaless** | Not supported | Not supported | Not supported | N/A | Not supported | Not supported |
|
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Not Supported | support | Not Supported | Supported |
|
||||||
| **Bulk Pulling (based on WebSocket) **| Support | Support | Not Supported | N/A | Not Supported | Supported |
|
| **DataFrame** | Not supported | Support | Not supported | Not supported | Not supported | Not supported |
|
||||||
| **DataFrame** | Not supported | Support | Not supported | N/A | Not supported | Not supported |
|
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
|
|
||||||
|
|
|
@ -23,6 +23,7 @@ namespace TDengineExample
|
||||||
CheckRes(conn, res, "failed to insert data");
|
CheckRes(conn, res, "failed to insert data");
|
||||||
int affectedRows = TDengine.AffectRows(res);
|
int affectedRows = TDengine.AffectRows(res);
|
||||||
Console.WriteLine("affectedRows " + affectedRows);
|
Console.WriteLine("affectedRows " + affectedRows);
|
||||||
|
TDengine.FreeResult(res);
|
||||||
ExitProgram(conn, 0);
|
ExitProgram(conn, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -38,12 +38,12 @@ public class SubscribeDemo {
|
||||||
statement.executeUpdate("create database " + DB_NAME);
|
statement.executeUpdate("create database " + DB_NAME);
|
||||||
statement.executeUpdate("use " + DB_NAME);
|
statement.executeUpdate("use " + DB_NAME);
|
||||||
statement.executeUpdate(
|
statement.executeUpdate(
|
||||||
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT) TAGS (`groupid` INT, `location` BINARY(16))");
|
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT) TAGS (`groupid` INT, `location` BINARY(24))");
|
||||||
statement.executeUpdate("CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')");
|
statement.executeUpdate("CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')");
|
||||||
statement.executeUpdate("INSERT INTO `d0` values(now - 10s, 0.32, 116)");
|
statement.executeUpdate("INSERT INTO `d0` values(now - 10s, 0.32, 116)");
|
||||||
statement.executeUpdate("INSERT INTO `d0` values(now - 8s, NULL, NULL)");
|
statement.executeUpdate("INSERT INTO `d0` values(now - 8s, NULL, NULL)");
|
||||||
statement.executeUpdate(
|
statement.executeUpdate(
|
||||||
"INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119)");
|
"INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119)");
|
||||||
statement.executeUpdate(
|
statement.executeUpdate(
|
||||||
"INSERT INTO `d1` values (now-8s, 10, 120) (now - 6s, 10, 119) (now - 4s, 11.2, 118)");
|
"INSERT INTO `d1` values (now-8s, 10, 120) (now - 6s, 10, 119) (now - 4s, 11.2, 118)");
|
||||||
// create topic
|
// create topic
|
||||||
|
@ -75,4 +75,4 @@ public class SubscribeDemo {
|
||||||
}
|
}
|
||||||
timer.cancel();
|
timer.cancel();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,7 @@ class MockDataSource implements Iterator {
|
||||||
private int currentTbId = -1;
|
private int currentTbId = -1;
|
||||||
|
|
||||||
// mock values
|
// mock values
|
||||||
String[] location = {"LosAngeles", "SanDiego", "Hollywood", "Compton", "San Francisco"};
|
String[] location = {"California.LosAngeles", "California.SanDiego", "California.SanJose", "California.Campbell", "California.SanFrancisco"};
|
||||||
float[] current = {8.8f, 10.7f, 9.9f, 8.9f, 9.4f};
|
float[] current = {8.8f, 10.7f, 9.9f, 8.9f, 9.4f};
|
||||||
int[] voltage = {119, 116, 111, 113, 118};
|
int[] voltage = {119, 116, 111, 113, 118};
|
||||||
float[] phase = {0.32f, 0.34f, 0.33f, 0.329f, 0.141f};
|
float[] phase = {0.32f, 0.34f, 0.33f, 0.329f, 0.141f};
|
||||||
|
@ -50,4 +50,4 @@ class MockDataSource implements Iterator {
|
||||||
|
|
||||||
return sb.toString();
|
return sb.toString();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,11 +3,11 @@ import time
|
||||||
|
|
||||||
class MockDataSource:
|
class MockDataSource:
|
||||||
samples = [
|
samples = [
|
||||||
"8.8,119,0.32,LosAngeles,0",
|
"8.8,119,0.32,California.LosAngeles,0",
|
||||||
"10.7,116,0.34,SanDiego,1",
|
"10.7,116,0.34,California.SanDiego,1",
|
||||||
"9.9,111,0.33,Hollywood,2",
|
"9.9,111,0.33,California.SanJose,2",
|
||||||
"8.9,113,0.329,Compton,3",
|
"8.9,113,0.329,California.Campbell,3",
|
||||||
"9.4,118,0.141,San Francisco,4"
|
"9.4,118,0.141,California.SanFrancisco,4"
|
||||||
]
|
]
|
||||||
|
|
||||||
def __init__(self, tb_name_prefix, table_count):
|
def __init__(self, tb_name_prefix, table_count):
|
||||||
|
|
|
@ -12,7 +12,7 @@ async fn main() -> anyhow::Result<()> {
|
||||||
// bind table name and tags
|
// bind table name and tags
|
||||||
stmt.set_tbname_tags(
|
stmt.set_tbname_tags(
|
||||||
"d1001",
|
"d1001",
|
||||||
&[Value::VarChar("San Fransico".into()), Value::Int(2)],
|
&[Value::VarChar("California.SanFransico".into()), Value::Int(2)],
|
||||||
)?;
|
)?;
|
||||||
// bind values.
|
// bind values.
|
||||||
let values = vec![
|
let values = vec![
|
||||||
|
|
|
@ -19,13 +19,13 @@ struct Record {
|
||||||
async fn prepare(taos: Taos) -> anyhow::Result<()> {
|
async fn prepare(taos: Taos) -> anyhow::Result<()> {
|
||||||
let inserted = taos.exec_many([
|
let inserted = taos.exec_many([
|
||||||
// create child table
|
// create child table
|
||||||
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
|
"CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')",
|
||||||
// insert into child table
|
// insert into child table
|
||||||
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
|
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
|
||||||
// insert with NULL values
|
// insert with NULL values
|
||||||
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
|
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
|
||||||
// insert and automatically create table with tags if not exists
|
// insert and automatically create table with tags if not exists
|
||||||
"INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
|
"INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119, 0.33)",
|
||||||
// insert many records in a single sql
|
// insert many records in a single sql
|
||||||
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
|
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
|
||||||
]).await?;
|
]).await?;
|
||||||
|
@ -48,7 +48,7 @@ async fn main() -> anyhow::Result<()> {
|
||||||
format!("CREATE DATABASE `{db}`"),
|
format!("CREATE DATABASE `{db}`"),
|
||||||
format!("USE `{db}`"),
|
format!("USE `{db}`"),
|
||||||
// create super table
|
// create super table
|
||||||
format!("CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))"),
|
format!("CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(24))"),
|
||||||
// create topic for subscription
|
// create topic for subscription
|
||||||
format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}")
|
format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}")
|
||||||
])
|
])
|
||||||
|
|
|
@ -14,14 +14,14 @@ async fn main() -> anyhow::Result<()> {
|
||||||
]).await?;
|
]).await?;
|
||||||
|
|
||||||
let inserted = taos.exec("INSERT INTO
|
let inserted = taos.exec("INSERT INTO
|
||||||
power.d1001 USING power.meters TAGS('San Francisco', 2)
|
power.d1001 USING power.meters TAGS('California.SanFrancisco', 2)
|
||||||
VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000)
|
VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000)
|
||||||
('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
||||||
power.d1002 USING power.meters TAGS('San Francisco', 3)
|
power.d1002 USING power.meters TAGS('California.SanFrancisco', 3)
|
||||||
VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
||||||
power.d1003 USING power.meters TAGS('Los Angeles', 2)
|
power.d1003 USING power.meters TAGS('California.LosAngeles', 2)
|
||||||
VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
||||||
power.d1004 USING power.meters TAGS('Los Angeles', 3)
|
power.d1004 USING power.meters TAGS('California.LosAngeles', 3)
|
||||||
VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)").await?;
|
VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)").await?;
|
||||||
|
|
||||||
assert_eq!(inserted, 8);
|
assert_eq!(inserted, 8);
|
||||||
|
|
|
@ -48,7 +48,7 @@ TDengine 的主要功能如下:
|
||||||
- 多种[数据导出](../operation/export)方式
|
- 多种[数据导出](../operation/export)方式
|
||||||
9. 工具
|
9. 工具
|
||||||
- 提供[交互式命令行程序(CLI)](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询
|
- 提供[交互式命令行程序(CLI)](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询
|
||||||
- 提供压力测试工具[taosBenchmark](../reference/taosbenchmark),用于测试 TDengine 的性能
|
- 提供压力测试工具 [taosBenchmark](../reference/taosbenchmark),用于测试 TDengine 的性能
|
||||||
10. 编程
|
10. 编程
|
||||||
- 提供各种语言的[连接器(Connector)](../connector): 如 [C/C++](../connector/cpp)、[Java](../connector/java)、[Go](../connector/go)、[Node.js](../connector/node)、[Rust](../connector/rust)、[Python](../connector/python)、[C#](../connector/csharp) 等
|
- 提供各种语言的[连接器(Connector)](../connector): 如 [C/C++](../connector/cpp)、[Java](../connector/java)、[Go](../connector/go)、[Node.js](../connector/node)、[Rust](../connector/rust)、[Python](../connector/python)、[C#](../connector/csharp) 等
|
||||||
- 支持 [REST 接口](../connector/rest-api/)
|
- 支持 [REST 接口](../connector/rest-api/)
|
||||||
|
|
|
@ -4,119 +4,118 @@ title: 数据模型和基本概念
|
||||||
description: TDengine 的数据模型和基本概念
|
description: TDengine 的数据模型和基本概念
|
||||||
---
|
---
|
||||||
|
|
||||||
为了便于解释基本概念,便于撰写示例程序,整个 TDengine 文档以智能电表作为典型时序数据场景。假设每个智能电表采集电流、电压、相位三个量,有多个智能电表,每个电表有位置 location 和分组 group ID 的静态属性. 其采集的数据类似如下的表格:
|
为了便于解释基本概念,便于撰写示例程序,整个 TDengine 文档以智能电表作为典型时序数据场景。假设每个智能电表采集电流、电压、相位三个量,有多个智能电表,每个电表有位置 Location 和分组 Group ID 的静态属性. 其采集的数据类似如下的表格:
|
||||||
|
|
||||||
<div className="center-table">
|
<div className="center-table">
|
||||||
<table>
|
<table>
|
||||||
<thead><tr>
|
<thead>
|
||||||
<th>Device ID</th>
|
<tr>
|
||||||
<th>Time Stamp</th>
|
<th rowSpan="2">Device ID</th>
|
||||||
<th colSpan="3">Collected Metrics</th>
|
<th rowSpan="2">Timestamp</th>
|
||||||
<th colSpan="2">Tags</th>
|
<th colSpan="3">Collected Metrics</th>
|
||||||
|
<th colSpan="2">Tags</th>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<th>Device ID</th>
|
<th>current</th>
|
||||||
<th>Time Stamp</th>
|
<th>voltage</th>
|
||||||
<th>current</th>
|
<th>phase</th>
|
||||||
<th>voltage</th>
|
<th>location</th>
|
||||||
<th>phase</th>
|
<th>groupid</th>
|
||||||
<th>location</th>
|
</tr>
|
||||||
<th>groupId</th>
|
</thead>
|
||||||
</tr>
|
<tbody>
|
||||||
</thead>
|
<tr>
|
||||||
<tbody>
|
<td>d1001</td>
|
||||||
<tr>
|
<td>1538548685000</td>
|
||||||
<td>d1001</td>
|
<td>10.3</td>
|
||||||
<td>1538548685000</td>
|
<td>219</td>
|
||||||
<td>10.3</td>
|
<td>0.31</td>
|
||||||
<td>219</td>
|
<td>California.SanFrancisco</td>
|
||||||
<td>0.31</td>
|
<td>2</td>
|
||||||
<td>California.SanFrancisco</td>
|
</tr>
|
||||||
<td>2</td>
|
<tr>
|
||||||
</tr>
|
<td>d1002</td>
|
||||||
<tr>
|
<td>1538548684000</td>
|
||||||
<td>d1002</td>
|
<td>10.2</td>
|
||||||
<td>1538548684000</td>
|
<td>220</td>
|
||||||
<td>10.2</td>
|
<td>0.23</td>
|
||||||
<td>220</td>
|
<td>California.SanFrancisco</td>
|
||||||
<td>0.23</td>
|
<td>3</td>
|
||||||
<td>California.SanFrancisco</td>
|
</tr>
|
||||||
<td>3</td>
|
<tr>
|
||||||
</tr>
|
<td>d1003</td>
|
||||||
<tr>
|
<td>1538548686500</td>
|
||||||
<td>d1003</td>
|
<td>11.5</td>
|
||||||
<td>1538548686500</td>
|
<td>221</td>
|
||||||
<td>11.5</td>
|
<td>0.35</td>
|
||||||
<td>221</td>
|
<td>California.LosAngeles</td>
|
||||||
<td>0.35</td>
|
<td>3</td>
|
||||||
<td>California.LosAngeles</td>
|
</tr>
|
||||||
<td>3</td>
|
<tr>
|
||||||
</tr>
|
<td>d1004</td>
|
||||||
<tr>
|
<td>1538548685500</td>
|
||||||
<td>d1004</td>
|
<td>13.4</td>
|
||||||
<td>1538548685500</td>
|
<td>223</td>
|
||||||
<td>13.4</td>
|
<td>0.29</td>
|
||||||
<td>223</td>
|
<td>California.LosAngeles</td>
|
||||||
<td>0.29</td>
|
<td>2</td>
|
||||||
<td>California.LosAngeles</td>
|
</tr>
|
||||||
<td>2</td>
|
<tr>
|
||||||
</tr>
|
<td>d1001</td>
|
||||||
<tr>
|
<td>1538548695000</td>
|
||||||
<td>d1001</td>
|
<td>12.6</td>
|
||||||
<td>1538548695000</td>
|
<td>218</td>
|
||||||
<td>12.6</td>
|
<td>0.33</td>
|
||||||
<td>218</td>
|
<td>California.SanFrancisco</td>
|
||||||
<td>0.33</td>
|
<td>2</td>
|
||||||
<td>California.SanFrancisco</td>
|
</tr>
|
||||||
<td>2</td>
|
<tr>
|
||||||
</tr>
|
<td>d1004</td>
|
||||||
<tr>
|
<td>1538548696600</td>
|
||||||
<td>d1004</td>
|
<td>11.8</td>
|
||||||
<td>1538548696600</td>
|
<td>221</td>
|
||||||
<td>11.8</td>
|
<td>0.28</td>
|
||||||
<td>221</td>
|
<td>California.LosAngeles</td>
|
||||||
<td>0.28</td>
|
<td>2</td>
|
||||||
<td>California.LosAngeles</td>
|
</tr>
|
||||||
<td>2</td>
|
<tr>
|
||||||
</tr>
|
<td>d1002</td>
|
||||||
<tr>
|
<td>1538548696650</td>
|
||||||
<td>d1002</td>
|
<td>10.3</td>
|
||||||
<td>1538548696650</td>
|
<td>218</td>
|
||||||
<td>10.3</td>
|
<td>0.25</td>
|
||||||
<td>218</td>
|
<td>California.SanFrancisco</td>
|
||||||
<td>0.25</td>
|
<td>3</td>
|
||||||
<td>California.SanFrancisco</td>
|
</tr>
|
||||||
<td>3</td>
|
<tr>
|
||||||
</tr>
|
<td>d1001</td>
|
||||||
<tr>
|
<td>1538548696800</td>
|
||||||
<td>d1001</td>
|
<td>12.3</td>
|
||||||
<td>1538548696800</td>
|
<td>221</td>
|
||||||
<td>12.3</td>
|
<td>0.31</td>
|
||||||
<td>221</td>
|
<td>California.SanFrancisco</td>
|
||||||
<td>0.31</td>
|
<td>2</td>
|
||||||
<td>California.SanFrancisco</td>
|
</tr>
|
||||||
<td>2</td>
|
</tbody>
|
||||||
</tr>
|
|
||||||
</tbody>
|
|
||||||
</table>
|
</table>
|
||||||
<a href="#model_table1">表 1:智能电表数据示例</a>
|
<a name="#model_table1">表 1. 智能电表数据示例</a>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
每一条记录都有设备 ID,时间戳,采集的物理量以及每个设备相关的静态标签。每个设备是受外界的触发,或按照设定的周期采集数据。采集的数据点是时序的,是一个数据流。
|
每一条记录都有设备 ID、时间戳、采集的物理量(如上表中的 `current`、`voltage` 和 `phase`)以及每个设备相关的静态标签(`location` 和 `groupid`)。每个设备是受外界的触发,或按照设定的周期采集数据。采集的数据点是时序的,是一个数据流。
|
||||||
|
|
||||||
## 采集量 (Metric)
|
## 采集量(Metric)
|
||||||
|
|
||||||
采集量是指传感器、设备或其他类型采集点采集的物理量,比如电流、电压、温度、压力、GPS 位置等,是随时间变化的,数据类型可以是整型、浮点型、布尔型,也可是字符串。随着时间的推移,存储的采集量的数据量越来越大。智能电表示例中的电流、电压、相位就是采集量。
|
采集量是指传感器、设备或其他类型采集点采集的物理量,比如电流、电压、温度、压力、GPS 位置等,是随时间变化的,数据类型可以是整型、浮点型、布尔型,也可是字符串。随着时间的推移,存储的采集量的数据量越来越大。智能电表示例中的电流、电压、相位就是采集量。
|
||||||
|
|
||||||
## 标签 (Label/Tag)
|
## 标签(Label/Tag)
|
||||||
|
|
||||||
标签是指传感器、设备或其他类型采集点的静态属性,不是随时间变化的,比如设备型号、颜色、设备的所在地等,数据类型可以是任何类型。虽然是静态的,但 TDengine 容许用户修改、删除或增加标签值。与采集量不一样的是,随时间的推移,存储的标签的数据量不会有什么变化。智能电表示例中的location与groupId就是标签。
|
标签是指传感器、设备或其他类型采集点的静态属性,不是随时间变化的,比如设备型号、颜色、设备的所在地等,数据类型可以是任何类型。虽然是静态的,但 TDengine 容许用户修改、删除或增加标签值。与采集量不一样的是,随时间的推移,存储的标签的数据量不会有什么变化。智能电表示例中的 `location` 与 `groupid` 就是标签。
|
||||||
|
|
||||||
## 数据采集点 (Data Collection Point)
|
## 数据采集点(Data Collection Point)
|
||||||
|
|
||||||
数据采集点是指按照预设时间周期或受事件触发采集物理量的硬件或软件。一个数据采集点可以采集一个或多个采集量,**但这些采集量都是同一时刻采集的,具有相同的时间戳**。对于复杂的设备,往往有多个数据采集点,每个数据采集点采集的周期都可能不一样,而且完全独立,不同步。比如对于一台汽车,有数据采集点专门采集 GPS 位置,有数据采集点专门采集发动机状态,有数据采集点专门采集车内的环境,这样一台汽车就有三个数据采集点。智能电表示例中的d1001, d1002, d1003, d1004等就是数据采集点。
|
数据采集点是指按照预设时间周期或受事件触发采集物理量的硬件或软件。一个数据采集点可以采集一个或多个采集量,**但这些采集量都是同一时刻采集的,具有相同的时间戳**。对于复杂的设备,往往有多个数据采集点,每个数据采集点采集的周期都可能不一样,而且完全独立,不同步。比如对于一台汽车,有数据采集点专门采集 GPS 位置,有数据采集点专门采集发动机状态,有数据采集点专门采集车内的环境,这样一台汽车就有三个数据采集点。智能电表示例中的 d1001、d1002、d1003、d1004 等就是数据采集点。
|
||||||
|
|
||||||
## 表 (Table)
|
## 表(Table)
|
||||||
|
|
||||||
因为采集量一般是结构化数据,同时为降低学习门槛,TDengine 采用传统的关系型数据库模型管理数据。用户需要先创建库,然后创建表,之后才能插入或查询数据。
|
因为采集量一般是结构化数据,同时为降低学习门槛,TDengine 采用传统的关系型数据库模型管理数据。用户需要先创建库,然后创建表,之后才能插入或查询数据。
|
||||||
|
|
||||||
|
@ -129,50 +128,56 @@ description: TDengine 的数据模型和基本概念
|
||||||
|
|
||||||
如果采用传统的方式,将多个数据采集点的数据写入一张表,由于网络延时不可控,不同数据采集点的数据到达服务器的时序是无法保证的,写入操作是要有锁保护的,而且一个数据采集点的数据是难以保证连续存储在一起的。**采用一个数据采集点一张表的方式,能最大程度的保证单个数据采集点的插入和查询的性能是最优的。**
|
如果采用传统的方式,将多个数据采集点的数据写入一张表,由于网络延时不可控,不同数据采集点的数据到达服务器的时序是无法保证的,写入操作是要有锁保护的,而且一个数据采集点的数据是难以保证连续存储在一起的。**采用一个数据采集点一张表的方式,能最大程度的保证单个数据采集点的插入和查询的性能是最优的。**
|
||||||
|
|
||||||
TDengine 建议用数据采集点的名字(如上表中的 D1001)来做表名。每个数据采集点可能同时采集多个采集量(如上表中的 current,voltage,phase),每个采集量对应一张表中的一列,数据类型可以是整型、浮点型、字符串等。除此之外,表的第一列必须是时间戳,即数据类型为 timestamp。对采集量,TDengine 将自动按照时间戳建立索引,但对采集量本身不建任何索引。数据用列式存储方式保存。
|
TDengine 建议用数据采集点的名字(如上表中的 d1001)来做表名。每个数据采集点可能同时采集多个采集量(如上表中的 `current`、`voltage` 和 `phase`),每个采集量对应一张表中的一列,数据类型可以是整型、浮点型、字符串等。除此之外,表的第一列必须是时间戳,即数据类型为 Timestamp。对采集量,TDengine 将自动按照时间戳建立索引,但对采集量本身不建任何索引。数据用列式存储方式保存。
|
||||||
|
|
||||||
对于复杂的设备,比如汽车,它有多个数据采集点,那么就需要为一台汽车建立多张表。
|
对于复杂的设备,比如汽车,它有多个数据采集点,那么就需要为一辆汽车建立多张表。
|
||||||
|
|
||||||
|
## 超级表(STable)
|
||||||
## 超级表 (STable)
|
|
||||||
|
|
||||||
由于一个数据采集点一张表,导致表的数量巨增,难以管理,而且应用经常需要做采集点之间的聚合操作,聚合的操作也变得复杂起来。为解决这个问题,TDengine 引入超级表(Super Table,简称为 STable)的概念。
|
由于一个数据采集点一张表,导致表的数量巨增,难以管理,而且应用经常需要做采集点之间的聚合操作,聚合的操作也变得复杂起来。为解决这个问题,TDengine 引入超级表(Super Table,简称为 STable)的概念。
|
||||||
|
|
||||||
超级表是指某一特定类型的数据采集点的集合。同一类型的数据采集点,其表的结构是完全一样的,但每个表(数据采集点)的静态属性(标签)是不一样的。描述一个超级表(某一特定类型的数据采集点的集合),除需要定义采集量的表结构之外,还需要定义其标签的 schema,标签的数据类型可以是整数、浮点数、字符串,标签可以有多个,可以事后增加、删除或修改。如果整个系统有 N 个不同类型的数据采集点,就需要建立 N 个超级表。
|
超级表是指某一特定类型的数据采集点的集合。同一类型的数据采集点,其表的结构是完全一样的,但每个表(数据采集点)的静态属性(标签)是不一样的。描述一个超级表(某一特定类型的数据采集点的集合),除需要定义采集量的表结构之外,还需要定义其标签的 Schema,标签的数据类型可以是整数、浮点数、字符串、JSON,标签可以有多个,可以事后增加、删除或修改。如果整个系统有 N 个不同类型的数据采集点,就需要建立 N 个超级表。
|
||||||
|
|
||||||
在 TDengine 的设计里,**表用来代表一个具体的数据采集点,超级表用来代表一组相同类型的数据采集点集合**。智能电表示例中,我们可以创建一个超级表meters.
|
在 TDengine 的设计里,**表用来代表一个具体的数据采集点,超级表用来代表一组相同类型的数据采集点集合**。智能电表示例中,我们可以创建一个超级表 `meters`.
|
||||||
|
|
||||||
## 子表 (Subtable)
|
## 子表(Subtable)
|
||||||
|
|
||||||
当为某个具体数据采集点创建表时,用户可以使用超级表的定义做模板,同时指定该具体采集点(表)的具体标签值来创建该表。**通过超级表创建的表称之为子表**。正常的表与子表的差异在于:
|
当为某个具体数据采集点创建表时,用户可以使用超级表的定义做模板,同时指定该具体采集点(表)的具体标签值来创建该表。**通过超级表创建的表称之为子表**。正常的表与子表的差异在于:
|
||||||
|
|
||||||
1. 子表就是表,因此所有正常表的SQL操作都可以在子表上执行。
|
1. 子表就是表,因此所有正常表的 SQL 操作都可以在子表上执行。
|
||||||
2. 子表在正常表的基础上有扩展,它是带有静态标签的,而且这些标签可以事后增加、删除、修改,而正常的表没有。
|
2. 子表在正常表的基础上有扩展,它是带有静态标签的,而且这些标签可以事后增加、删除、修改,而正常的表没有。
|
||||||
3. 子表一定属于一张超级表,但普通表不属于任何超级表
|
3. 子表一定属于一张超级表,但普通表不属于任何超级表
|
||||||
4. 普通表无法转为子表,子表也无法转为普通表。
|
4. 普通表无法转为子表,子表也无法转为普通表。
|
||||||
|
|
||||||
超级表与与基于超级表建立的子表之间的关系表现在:
|
超级表与与基于超级表建立的子表之间的关系表现在:
|
||||||
|
|
||||||
1. 一张超级表包含有多张子表,这些子表具有相同的采集量 schema,但带有不同的标签值。
|
1. 一张超级表包含有多张子表,这些子表具有相同的采集量 Schema,但带有不同的标签值。
|
||||||
2. 不能通过子表调整数据或标签的模式,对于超级表的数据模式修改立即对所有的子表生效。
|
2. 不能通过子表调整数据或标签的模式,对于超级表的数据模式修改立即对所有的子表生效。
|
||||||
3. 超级表只定义一个模板,自身不存储任何数据或标签信息。因此,不能向一个超级表写入数据,只能将数据写入子表中。
|
3. 超级表只定义一个模板,自身不存储任何数据或标签信息。因此,不能向一个超级表写入数据,只能将数据写入子表中。
|
||||||
|
|
||||||
查询既可以在表上进行,也可以在超级表上进行。针对超级表的查询,TDengine 将把所有子表中的数据视为一个整体数据集进行处理,会先把满足标签过滤条件的表从超级表中找出来,然后再扫描这些表的时序数据,进行聚合操作,这样需要扫描的数据集会大幅减少,从而显著提高查询的性能。本质上,TDengine 通过对超级表查询的支持,实现了多个同类数据采集点的高效聚合。
|
查询既可以在表上进行,也可以在超级表上进行。针对超级表的查询,TDengine 将把所有子表中的数据视为一个整体数据集进行处理,会先把满足标签过滤条件的表从超级表中找出来,然后再扫描这些表的时序数据,进行聚合操作,这样需要扫描的数据集会大幅减少,从而显著提高查询的性能。本质上,TDengine 通过对超级表查询的支持,实现了多个同类数据采集点的高效聚合。
|
||||||
|
|
||||||
TDengine系统建议给一个数据采集点建表,需要通过超级表建表,而不是建普通表。在智能电表的示例中,我们可以通过超级表meters创建子表d1001, d1002, d1003, d1004等。
|
TDengine 系统建议给一个数据采集点建表,需要通过超级表建表,而不是建普通表。在智能电表的示例中,我们可以通过超级表 meters 创建子表 d1001、d1002、d1003、d1004 等。
|
||||||
|
|
||||||
为了更好地理解超级与子表的关系,可以参考下面关于智能电表数据模型的示意图。 
|
为了更好地理解采集量、标签、超级与子表的关系,可以参考下面关于智能电表数据模型的示意图。
|
||||||
|
|
||||||
## 库 (database)
|
<figure>
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
<center><figcaption>图 1. 智能电表数据模型示意图</figcaption></center>
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
## 库(Database)
|
||||||
|
|
||||||
库是指一组表的集合。TDengine 容许一个运行实例有多个库,而且每个库可以配置不同的存储策略。不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为了在各种场景下 TDengine 都能最大效率的工作,TDengine 建议将不同数据特征的超级表创建在不同的库里。
|
库是指一组表的集合。TDengine 容许一个运行实例有多个库,而且每个库可以配置不同的存储策略。不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为了在各种场景下 TDengine 都能最大效率的工作,TDengine 建议将不同数据特征的超级表创建在不同的库里。
|
||||||
|
|
||||||
一个库里,可以有一到多个超级表,但一个超级表只属于一个库。一个超级表所拥有的子表全部存在一个库里。
|
一个库里,可以有一到多个超级表,但一个超级表只属于一个库。一个超级表所拥有的子表全部存在一个库里。
|
||||||
|
|
||||||
## FQDN & End Point
|
## FQDN & Endpoint
|
||||||
|
|
||||||
FQDN (fully qualified domain name, 完全限定域名)是 Internet 上特定计算机或主机的完整域名。FQDN 由两部分组成:主机名和域名。例如,假设邮件服务器的 FQDN 可能是 mail.tdengine.com。主机名是 mail,主机位于域名 tdengine.com 中。DNS(Domain Name System),负责将 FQDN 翻译成 IP,是互联网应用的寻址方式。对于没有 DNS 的系统,可以通过配置 hosts 文件来解决。
|
FQDN(Fully Qualified Domain Name,完全限定域名)是 Internet 上特定计算机或主机的完整域名。FQDN 由两部分组成:主机名和域名。例如,假设邮件服务器的 FQDN 可能是 mail.tdengine.com。主机名是 mail,主机位于域名 tdengine.com 中。DNS(Domain Name System),负责将 FQDN 翻译成 IP,是互联网应用的寻址方式。对于没有 DNS 的系统,可以通过配置 hosts 文件来解决。
|
||||||
|
|
||||||
TDengine 集群的每个节点是由 End Point 来唯一标识的,End Point 是由 FQDN 外加 Port 组成,比如 h1.tdengine.com:6030。这样当 IP 发生变化的时候,我们依然可以使用 FQDN 来动态找到节点,不需要更改集群的任何配置。而且采用 FQDN,便于内网和外网对同一个集群的统一访问。
|
TDengine 集群的每个节点是由 Endpoint 来唯一标识的,Endpoint 是由 FQDN 外加 Port 组成,比如 h1.tdengine.com:6030。这样当 IP 发生变化的时候,我们依然可以使用 FQDN 来动态找到节点,不需要更改集群的任何配置。而且采用 FQDN,便于内网和外网对同一个集群的统一访问。
|
||||||
|
|
||||||
TDengine 不建议采用直接的 IP 地址访问集群,不利于管理。不了解 FQDN 概念,请看博文[《一篇文章说清楚 TDengine 的 FQDN》](https://www.taosdata.com/blog/2020/09/11/1824.html)。
|
TDengine 不建议采用直接的 IP 地址访问集群,不利于管理。不了解 FQDN 概念,请看博文[《一篇文章说清楚 TDengine 的 FQDN》](https://www.taosdata.com/blog/2020/09/11/1824.html)。
|
||||||
|
|
|
@ -4,11 +4,11 @@ title: 通过 Docker 快速体验 TDengine
|
||||||
description: 使用 Docker 快速体验 TDengine 的高效写入和查询
|
description: 使用 Docker 快速体验 TDengine 的高效写入和查询
|
||||||
---
|
---
|
||||||
|
|
||||||
本节首先介绍如何通过 Docker 快速体验 TDengine,然后介绍如何在 Docker 环境下体验 TDengine 的写入和查询功能。如果你不熟悉 Docker,请使用[安装包的方式快速体验](../../get-started/package/)。如果您希望为 TDengine 贡献代码或对内部技术实现感兴趣,请参考 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
|
本节首先介绍如何通过 Docker 快速体验 TDengine,然后介绍如何在 Docker 环境下体验 TDengine 的写入和查询功能。如果你不熟悉 Docker,请使用[安装包的方式快速体验](../../get-started/package/)。如果您希望为 TDengine 贡献代码或对内部技术实现感兴趣,请参考 [TDengine GitHub 主页](https://github.com/taosdata/TDengine)下载源码构建和安装。
|
||||||
|
|
||||||
## 启动 TDengine
|
## 启动 TDengine
|
||||||
|
|
||||||
如果已经安装了 docker, 只需执行下面的命令。
|
如果已经安装了 Docker,只需执行下面的命令:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
|
docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
|
||||||
|
@ -16,84 +16,84 @@ docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043
|
||||||
|
|
||||||
注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。
|
注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。
|
||||||
|
|
||||||
确定该容器已经启动并且在正常运行
|
确定该容器已经启动并且在正常运行。
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
docker ps
|
docker ps
|
||||||
```
|
```
|
||||||
|
|
||||||
进入该容器并执行 bash
|
进入该容器并执行 `bash`
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
docker exec -it <container name> bash
|
docker exec -it <container name> bash
|
||||||
```
|
```
|
||||||
|
|
||||||
然后就可以执行相关的 Linux 命令操作和访问 TDengine
|
然后就可以执行相关的 Linux 命令操作和访问 TDengine。
|
||||||
|
|
||||||
注: Docker 工具自身的下载和使用请参考 [Docker 官网文档](https://docs.docker.com/get-docker/)。
|
注:Docker 工具自身的下载和使用请参考 [Docker 官网文档](https://docs.docker.com/get-docker/)。
|
||||||
|
|
||||||
## 运行 TDengine CLI
|
## 运行 TDengine CLI
|
||||||
|
|
||||||
进入容器,执行 taos
|
进入容器,执行 `taos`:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ taos
|
$ taos
|
||||||
|
|
||||||
taos>
|
taos>
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 写入数据
|
## 使用 taosBenchmark 体验写入速度
|
||||||
|
|
||||||
可以使用 TDengine 的自带工具 taosBenchmark 快速体验 TDengine 的写入。
|
可以使用 TDengine 的自带工具 taosBenchmark 快速体验 TDengine 的写入速度。
|
||||||
|
|
||||||
进入容器,启动 taosBenchmark:
|
启动 TDengine 的服务,在 Linux 或 Windows 终端执行 `taosBenchmark`(曾命名为 `taosdemo`):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ taosBenchmark
|
$ taosBenchmark
|
||||||
|
```
|
||||||
```
|
|
||||||
|
|
||||||
该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "San Francisco" 或者 "Los Angeles"等城市名称。
|
该命令将在数据库 `test` 下面自动创建一张超级表 `meters`,该超级表下有 1 万张表,表名为 `d0` 到 `d9999`,每张表有 1 万条记录,每条记录有 `ts`、`current`、`voltage`、`phase` 四个字段,时间戳从 2017-07-14 10:40:00 000 到 2017-07-14 10:40:09 999,每张表带有标签 `location` 和 `groupId`,groupId 被设置为 1 到 10,location 被设置为 `California.Campbell`、`California.Cupertino`、`California.LosAngeles`、`California.MountainView`、`California.PaloAlto`、`California.SanDiego`、`California.SanFrancisco`、`California.SanJose`、`California.SantaClara` 或者 `California.Sunnyvale`。
|
||||||
|
|
||||||
这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能。
|
这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。
|
||||||
|
|
||||||
taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照 [taosBenchmark 参考手册](../../reference/taosbenchmark)。
|
taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照[如何使用 taosBenchmark 对 TDengine 进行性能测试](https://www.taosdata.com/2021/10/09/3111.html)和 [taosBenchmark 参考手册](../../reference/taosbenchmark)。
|
||||||
|
|
||||||
## 体验查询
|
## 使用 TDengine CLI 体验查询速度
|
||||||
|
|
||||||
使用上述 taosBenchmark 插入数据后,可以在 TDengine CLI 输入查询命令,体验查询速度。。
|
使用上述 `taosBenchmark` 插入数据后,可以在 TDengine CLI(taos)输入查询命令,体验查询速度。
|
||||||
|
|
||||||
查询超级表下记录总条数:
|
查询超级表 `meters` 下的记录总条数:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
taos> select count(*) from test.meters;
|
SELECT COUNT(*) FROM test.meters;
|
||||||
```
|
```
|
||||||
|
|
||||||
查询 1 亿条记录的平均值、最大值、最小值等:
|
查询 1 亿条记录的平均值、最大值、最小值等:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
taos> select avg(current), max(voltage), min(phase) from test.meters;
|
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
|
||||||
```
|
```
|
||||||
|
|
||||||
查询 location="San Francisco" 的记录总条数:
|
查询 location = "California.SanFrancisco" 的记录总条数:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
taos> select count(*) from test.meters where location="San Francisco";
|
SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco";
|
||||||
```
|
```
|
||||||
|
|
||||||
查询 groupId=10 的所有记录的平均值、最大值、最小值等:
|
查询 groupId = 10 的所有记录的平均值、最大值、最小值等:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
taos> select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
|
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10;
|
||||||
```
|
```
|
||||||
|
|
||||||
对表 d10 按 10s 进行平均值、最大值和最小值聚合统计:
|
对表 `d10` 按 10 每秒进行平均值、最大值和最小值聚合统计:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
|
SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(10s);
|
||||||
```
|
```
|
||||||
|
|
||||||
|
在上面的查询中,你选择的是区间内的第一个时间戳(ts),另一种选择方式是 `\_wstart`,它将给出时间窗口的开始。关于窗口查询的更多信息,参见[特色查询](../../taos-sql/distinguished/)。
|
||||||
|
|
||||||
## 其它
|
## 其它
|
||||||
|
|
||||||
更多关于在 Docker 环境下使用 TDengine 的细节,请参考 [在 Docker 下使用 TDengine](../../reference/docker)
|
更多关于在 Docker 环境下使用 TDengine 的细节,请参考 [在 Docker 下使用 TDengine](../../reference/docker)。
|
||||||
|
|
|
@ -10,23 +10,24 @@ import PkgListV3 from "/components/PkgListV3";
|
||||||
|
|
||||||
您可以[用 Docker 立即体验](../../get-started/docker/) TDengine。如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
|
您可以[用 Docker 立即体验](../../get-started/docker/) TDengine。如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
|
||||||
|
|
||||||
TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。目前 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/taosadapter/) 提供 [RESTful 接口](../../connector/rest-api/)。
|
TDengine 完整的软件包包括服务端(taosd)、应用驱动(taosc)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、命令行程序(CLI,taos)和一些工具软件。目前 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/taosadapter/) 提供 [RESTful 接口](../../connector/rest-api/)。
|
||||||
|
|
||||||
为方便使用,标准的服务端安装包包含了 taosd、taosAdapter、taosc、taos、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 lite 版本的安装包。
|
为方便使用,标准的服务端安装包包含了 taosd、taosAdapter、taosc、taos、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 Lite 版本的安装包。
|
||||||
|
|
||||||
在 Linux 系统上,TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统,rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。需要注意的是,rpm 和 deb 包不含 taosdump 和 TDinsight 安装脚本,这些工具需要通过安装 taosTool 包获得。TDengine 也提供 Windows x64 平台的安装包。
|
在 Linux 系统上,TDengine 社区版提供 Deb 和 RPM 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 Deb 支持 Debian/Ubuntu 及其衍生系统,RPM 支持 CentOS/RHEL/SUSE 及其衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。需要注意的是,RPM 和 Deb 包不含 `taosdump` 和 TDinsight 安装脚本,这些工具需要通过安装 taosTool 包获得。TDengine 也提供 Windows x64 平台的安装包。
|
||||||
|
|
||||||
## 安装
|
## 安装
|
||||||
|
|
||||||
<Tabs>
|
<Tabs>
|
||||||
<TabItem label="Deb 安装" value="debinst">
|
<TabItem label="Deb 安装" value="debinst">
|
||||||
|
|
||||||
1. 从列表中下载获得 deb 安装包;
|
1. 从列表中下载获得 Deb 安装包;
|
||||||
<PkgListV3 type={6}/>
|
<PkgListV3 type={6}/>
|
||||||
2. 进入到安装包所在目录,执行如下的安装命令:
|
2. 进入到安装包所在目录,执行如下的安装命令:
|
||||||
|
|
||||||
|
> 请将 `<version>` 替换为下载的安装包版本
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 替换为下载的安装包版本
|
|
||||||
sudo dpkg -i TDengine-server-<version>-Linux-x64.deb
|
sudo dpkg -i TDengine-server-<version>-Linux-x64.deb
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -34,12 +35,13 @@ sudo dpkg -i TDengine-server-<version>-Linux-x64.deb
|
||||||
|
|
||||||
<TabItem label="RPM 安装" value="rpminst">
|
<TabItem label="RPM 安装" value="rpminst">
|
||||||
|
|
||||||
1. 从列表中下载获得 rpm 安装包;
|
1. 从列表中下载获得 RPM 安装包;
|
||||||
<PkgListV3 type={5}/>
|
<PkgListV3 type={5}/>
|
||||||
2. 进入到安装包所在目录,执行如下的安装命令:
|
2. 进入到安装包所在目录,执行如下的安装命令:
|
||||||
|
|
||||||
|
> 请将 `<version>` 替换为下载的安装包版本
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 替换为下载的安装包版本
|
|
||||||
sudo rpm -ivh TDengine-server-<version>-Linux-x64.rpm
|
sudo rpm -ivh TDengine-server-<version>-Linux-x64.rpm
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -48,44 +50,46 @@ sudo rpm -ivh TDengine-server-<version>-Linux-x64.rpm
|
||||||
<TabItem label="tar.gz 安装" value="tarinst">
|
<TabItem label="tar.gz 安装" value="tarinst">
|
||||||
|
|
||||||
1. 从列表中下载获得 tar.gz 安装包;
|
1. 从列表中下载获得 tar.gz 安装包;
|
||||||
<PkgListV3 type={0}/>
|
<PkgListV3 type={0}/>
|
||||||
2. 进入到安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本:
|
2. 进入到安装包所在目录,使用 `tar` 解压安装包;
|
||||||
|
3. 进入到安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本。
|
||||||
|
|
||||||
|
> 请将 `<version>` 替换为下载的安装包版本
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 替换为下载的安装包版本
|
|
||||||
tar -zxvf TDengine-server-<version>-Linux-x64.tar.gz
|
tar -zxvf TDengine-server-<version>-Linux-x64.tar.gz
|
||||||
```
|
```
|
||||||
|
|
||||||
解压后进入相应路径,执行
|
解压文件后,进入相应子目录,执行其中的 `install.sh` 安装脚本:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo ./install.sh
|
sudo ./install.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
install.sh 安装脚本在执行过程中,会通过命令行交互界面询问一些配置信息。如果希望采取无交互安装方式,那么可以用 -e no 参数来执行 install.sh 脚本。运行 `./install.sh -h` 指令可以查看所有参数的详细说明信息。
|
install.sh 安装脚本在执行过程中,会通过命令行交互界面询问一些配置信息。如果希望采取无交互安装方式,那么可以运行 `./install.sh -e no`。运行 `./install.sh -h` 指令可以查看所有参数的详细说明信息。
|
||||||
:::
|
:::
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
|
||||||
<TabItem value="apt-get" label="apt-get">
|
<TabItem value="apt-get" label="apt-get">
|
||||||
可以使用 apt-get 工具从官方仓库安装。
|
可以使用 `apt-get` 工具从官方仓库安装。
|
||||||
|
|
||||||
**安装包仓库**
|
**配置包仓库**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add -
|
wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add -
|
||||||
echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list
|
echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list
|
||||||
```
|
```
|
||||||
|
|
||||||
如果安装 Beta 版需要安装包仓库
|
如果安装 Beta 版需要安装包仓库:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add -
|
wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add -
|
||||||
echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list
|
echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list
|
||||||
```
|
```
|
||||||
|
|
||||||
**使用 apt-get 命令安装**
|
**使用 `apt-get` 命令安装**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
|
@ -94,26 +98,26 @@ sudo apt-get install tdengine
|
||||||
```
|
```
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
apt-get 方式只适用于 Debian 或 Ubuntu 系统
|
apt-get 方式只适用于 Debian 或 Ubuntu 系统。
|
||||||
::::
|
::::
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem label="Windows 安装" value="windows">
|
<TabItem label="Windows 安装" value="windows">
|
||||||
|
|
||||||
注意:目前 TDengine 在 Windows 平台上只支持 Windows server 2016/2019 和 Windows 10/11 系统版本。
|
注意:目前 TDengine 在 Windows 平台上只支持 Windows Server 2016/2019 和 Windows 10/11。
|
||||||
|
|
||||||
1. 从列表中下载获得 exe 安装程序;
|
1. 从列表中下载获得 exe 安装程序;
|
||||||
<PkgListV3 type={3}/>
|
<PkgListV3 type={3}/>
|
||||||
2. 运行可执行程序来安装 TDengine。
|
2. 运行可执行程序来安装 TDengine。
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
下载其他组件、最新 Beta 版及之前版本的安装包,请点击[发布历史页面](../../releases/tdengine)
|
下载其他组件、最新 Beta 版及之前版本的安装包,请点击[发布历史页面](../../releases/tdengine)。
|
||||||
:::
|
:::
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
当安装第一个节点时,出现 Enter FQDN:提示的时候,不需要输入任何内容。只有当安装第二个或以后更多的节点时,才需要输入已有集群中任何一个可用节点的 FQDN,支持该新节点加入集群。当然也可以不输入,而是在新节点启动前,配置到新节点的配置文件中。
|
当安装第一个节点时,出现 `Enter FQDN:` 提示的时候,不需要输入任何内容。只有当安装第二个或以后更多的节点时,才需要输入已有集群中任何一个可用节点的 FQDN,支持该新节点加入集群。当然也可以不输入,而是在新节点启动前,配置到新节点的配置文件中。
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
@ -148,7 +152,7 @@ Active: inactive (dead)
|
||||||
|
|
||||||
如果 TDengine 服务正常工作,那么您可以通过 TDengine 的命令行程序 `taos` 来访问并体验 TDengine。
|
如果 TDengine 服务正常工作,那么您可以通过 TDengine 的命令行程序 `taos` 来访问并体验 TDengine。
|
||||||
|
|
||||||
systemctl 命令汇总:
|
如下 `systemctl` 命令可以帮助你管理 TDengine 服务:
|
||||||
|
|
||||||
- 启动服务进程:`systemctl start taosd`
|
- 启动服务进程:`systemctl start taosd`
|
||||||
|
|
||||||
|
@ -160,7 +164,7 @@ systemctl 命令汇总:
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
|
|
||||||
- systemctl 命令需要 _root_ 权限来运行,如果您非 _root_ 用户,请在命令前添加 sudo 。
|
- `systemctl` 命令需要 _root_ 权限来运行,如果您非 _root_ 用户,请在命令前添加 `sudo`。
|
||||||
- `systemctl stop taosd` 指令在执行后并不会马上停止 TDengine 服务,而是会等待系统中必要的落盘工作正常完成。在数据量很大的情况下,这可能会消耗较长时间。
|
- `systemctl stop taosd` 指令在执行后并不会马上停止 TDengine 服务,而是会等待系统中必要的落盘工作正常完成。在数据量很大的情况下,这可能会消耗较长时间。
|
||||||
- 如果系统中不支持 `systemd`,也可以用手动运行 `/usr/local/taos/bin/taosd` 方式启动 TDengine 服务。
|
- 如果系统中不支持 `systemd`,也可以用手动运行 `/usr/local/taos/bin/taosd` 方式启动 TDengine 服务。
|
||||||
|
|
||||||
|
@ -170,87 +174,93 @@ systemctl 命令汇总:
|
||||||
|
|
||||||
<TabItem label="Windows 系统" value="windows">
|
<TabItem label="Windows 系统" value="windows">
|
||||||
|
|
||||||
安装后,在 C:\TDengine 目录下,运行 taosd.exe 来启动 TDengine 服务进程。
|
安装后,在 `C:\TDengine` 目录下,运行 `taosd.exe` 来启动 TDengine 服务进程。
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
## TDengine 命令行 (CLI)
|
## TDengine 命令行(CLI)
|
||||||
|
|
||||||
为便于检查 TDengine 的状态,执行数据库 (Database) 的各种即席(Ad Hoc)查询,TDengine 提供一命令行应用程序(以下简称为 TDengine CLI) taos。要进入 TDengine 命令行,您只要在安装有 TDengine 的 Linux 终端执行 `taos` 即可,也可以在安装有 TDengine 的 Windows 终端的 C:\TDengine 目录下,运行 taos.exe 来启动 TDengine 命令行。
|
为便于检查 TDengine 的状态,执行数据库(Database)的各种即席(Ad Hoc)查询,TDengine 提供一命令行应用程序(以下简称为 TDengine CLI)taos。要进入 TDengine 命令行,您只要在安装有 TDengine 的 Linux 终端执行 `taos` 即可,也可以在安装有 TDengine 的 Windows 终端的 C:\TDengine 目录下,运行 taos.exe 来启动 TDengine 命令行。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
taos
|
taos
|
||||||
```
|
```
|
||||||
|
|
||||||
如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考 [FAQ](/train-faq/faq) 来解决终端连接服务端失败的问题)。 TDengine CLI 的提示符号如下:
|
如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考 [FAQ](/train-faq/faq) 来解决终端连接服务端失败的问题)。TDengine CLI 的提示符号如下:
|
||||||
|
|
||||||
```cmd
|
```cmd
|
||||||
taos>
|
taos>
|
||||||
```
|
```
|
||||||
|
|
||||||
在 TDengine CLI 中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行数据库(database)插入查询操作。在终端中运行的 SQL 语句需要以分号结束来运行。示例:
|
在 TDengine CLI 中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行数据库(Database)插入查询操作。在终端中运行的 SQL 语句需要以分号(;)结束来运行。示例:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
create database demo;
|
CREATE DATABASE demo;
|
||||||
use demo;
|
USE demo;
|
||||||
create table t (ts timestamp, speed int);
|
CREATE TABLE t (ts TIMESTAMP, speed INT);
|
||||||
insert into t values ('2019-07-15 00:00:00', 10);
|
INSERT INTO t VALUES ('2019-07-15 00:00:00', 10);
|
||||||
insert into t values ('2019-07-15 01:00:00', 20);
|
INSERT INTO t VALUES ('2019-07-15 01:00:00', 20);
|
||||||
select * from t;
|
SELECT * FROM t;
|
||||||
|
|
||||||
ts | speed |
|
ts | speed |
|
||||||
========================================
|
========================================
|
||||||
2019-07-15 00:00:00.000 | 10 |
|
2019-07-15 00:00:00.000 | 10 |
|
||||||
2019-07-15 01:00:00.000 | 20 |
|
2019-07-15 01:00:00.000 | 20 |
|
||||||
|
|
||||||
Query OK, 2 row(s) in set (0.003128s)
|
Query OK, 2 row(s) in set (0.003128s)
|
||||||
```
|
```
|
||||||
|
|
||||||
除执行 SQL 语句外,系统管理员还可以从 TDengine CLI 进行检查系统运行状态、添加删除用户账号等操作。TDengine CLI 连同应用驱动也可以独立安装在 Linux 或 Windows 机器上运行,更多细节请参考 [这里](../../reference/taos-shell/)
|
除执行 SQL 语句外,系统管理员还可以从 TDengine CLI 进行检查系统运行状态、添加删除用户账号等操作。TDengine CLI 连同应用驱动也可以独立安装在 Linux 或 Windows 机器上运行,更多细节请参考 [TDengine 命令行](../../reference/taos-shell/)。
|
||||||
|
|
||||||
## 使用 taosBenchmark 体验写入速度
|
## 使用 taosBenchmark 体验写入速度
|
||||||
|
|
||||||
启动 TDengine 的服务,在 Linux 或 windows 终端执行 `taosBenchmark` (曾命名为 `taosdemo`):
|
可以使用 TDengine 的自带工具 taosBenchmark 快速体验 TDengine 的写入速度。
|
||||||
|
|
||||||
|
启动 TDengine 的服务,在 Linux 或 Windows 终端执行 `taosBenchmark`(曾命名为 `taosdemo`):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
taosBenchmark
|
$ taosBenchmark
|
||||||
```
|
```
|
||||||
|
|
||||||
该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "California.SanFrancisco" 或者 "California.LosAngeles"。
|
该命令将在数据库 `test` 下面自动创建一张超级表 `meters`,该超级表下有 1 万张表,表名为 `d0` 到 `d9999`,每张表有 1 万条记录,每条记录有 `ts`、`current`、`voltage`、`phase` 四个字段,时间戳从 2017-07-14 10:40:00 000 到 2017-07-14 10:40:09 999,每张表带有标签 `location` 和 `groupId`,groupId 被设置为 1 到 10,location 被设置为 `California.Campbell`、`California.Cupertino`、`California.LosAngeles`、`California.MountainView`、`California.PaloAlto`、`California.SanDiego`、`California.SanFrancisco`、`California.SanJose`、`California.SantaClara` 或者 `California.Sunnyvale`。
|
||||||
|
|
||||||
这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。
|
这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。
|
||||||
|
|
||||||
taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照 [如何使用 taosBenchmark 对 TDengine 进行性能测试](https://www.taosdata.com/2021/10/09/3111.html)。
|
taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照[如何使用 taosBenchmark 对 TDengine 进行性能测试](https://www.taosdata.com/2021/10/09/3111.html)和 [taosBenchmark 参考手册](../../reference/taosbenchmark)。
|
||||||
|
|
||||||
## 使用 TDengine CLI 体验查询速度
|
## 使用 TDengine CLI 体验查询速度
|
||||||
|
|
||||||
使用上述 taosBenchmark 插入数据后,可以在 TDengine CLI 输入查询命令,体验查询速度。
|
使用上述 `taosBenchmark` 插入数据后,可以在 TDengine CLI(taos)输入查询命令,体验查询速度。
|
||||||
|
|
||||||
查询超级表下记录总条数:
|
查询超级表 `meters` 下的记录总条数:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
taos> select count(*) from test.meters;
|
SELECT COUNT(*) FROM test.meters;
|
||||||
```
|
```
|
||||||
|
|
||||||
查询 1 亿条记录的平均值、最大值、最小值等:
|
查询 1 亿条记录的平均值、最大值、最小值等:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
taos> select avg(current), max(voltage), min(phase) from test.meters;
|
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
|
||||||
```
|
```
|
||||||
|
|
||||||
查询 location="California.SanFrancisco" 的记录总条数:
|
查询 location = "California.SanFrancisco" 的记录总条数:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
taos> select count(*) from test.meters where location="California.SanFrancisco";
|
SELECT COUNT(*) FROM test.meters WHERE location = "Calaifornia.SanFrancisco";
|
||||||
```
|
```
|
||||||
|
|
||||||
查询 groupId=10 的所有记录的平均值、最大值、最小值等:
|
查询 groupId = 10 的所有记录的平均值、最大值、最小值等:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
taos> select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
|
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10;
|
||||||
```
|
```
|
||||||
|
|
||||||
对表 d10 按 10s 进行平均值、最大值和最小值聚合统计:
|
对表 `d10` 按 10 每秒进行平均值、最大值和最小值聚合统计:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
|
SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(10s);
|
||||||
```
|
```
|
||||||
|
|
||||||
|
在上面的查询中,你选择的是区间内的第一个时间戳(ts),另一种选择方式是 `\_wstart`,它将给出时间窗口的开始。关于窗口查询的更多信息,参见[特色查询](../../taos-sql/distinguished/)。
|
||||||
|
|
|
@ -116,7 +116,7 @@ aggfn为函数名的占位符,需要修改为自己的函数名,如l2norm。
|
||||||
|
|
||||||
参数的具体含义是:
|
参数的具体含义是:
|
||||||
- inputDataBlock: 输入的数据块
|
- inputDataBlock: 输入的数据块
|
||||||
- resultColumn: 输出列。输出列
|
- resultColumn: 输出列
|
||||||
|
|
||||||
### 聚合接口函数
|
### 聚合接口函数
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,5 @@
|
||||||
```java
|
```java
|
||||||
{{#include docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java}}
|
{{#include docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java}}
|
||||||
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
|
|
||||||
{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
|
|
||||||
```
|
```
|
||||||
```java
|
```java
|
||||||
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
|
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
|
||||||
|
|
|
@ -155,15 +155,15 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> {
|
||||||
let inserted = taos.exec_many([
|
let inserted = taos.exec_many([
|
||||||
// create super table
|
// create super table
|
||||||
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
|
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
|
||||||
TAGS (`groupid` INT, `location` BINARY(16))",
|
TAGS (`groupid` INT, `location` BINARY(24))",
|
||||||
// create child table
|
// create child table
|
||||||
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
|
"CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')",
|
||||||
// insert into child table
|
// insert into child table
|
||||||
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
|
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
|
||||||
// insert with NULL values
|
// insert with NULL values
|
||||||
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
|
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
|
||||||
// insert and automatically create table with tags if not exists
|
// insert and automatically create table with tags if not exists
|
||||||
"INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
|
"INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119, 0.33)",
|
||||||
// insert many records in a single sql
|
// insert many records in a single sql
|
||||||
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
|
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
|
||||||
]).await?;
|
]).await?;
|
||||||
|
|
|
@ -41,14 +41,14 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
|
||||||
|
|
||||||
### 使用原生接口(taosc)
|
### 使用原生接口(taosc)
|
||||||
|
|
||||||
| **功能特性** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** |
|
| **功能特性** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** |
|
||||||
| -------------- | -------- | ---------- | ------ | ------ | ----------- | -------- |
|
| ------------------- | -------- | ---------- | ------ | ------ | ----------- | -------- |
|
||||||
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||||
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||||
| **参数绑定** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
| **参数绑定** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||||
| ** TMQ ** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
| **数据订阅(TMQ)** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||||
| **Schemaless** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
| **Schemaless** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||||
| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
|
| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
由于不同编程语言数据库框架规范不同,并不意味着所有 C/C++ 接口都需要对应封装支持。
|
由于不同编程语言数据库框架规范不同,并不意味着所有 C/C++ 接口都需要对应封装支持。
|
||||||
|
@ -56,16 +56,15 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
|
||||||
|
|
||||||
### 使用 http (REST 或 WebSocket) 接口
|
### 使用 http (REST 或 WebSocket) 接口
|
||||||
|
|
||||||
| **功能特性** | **Java** | **Python** | **Go** | **C#(暂不支持)** | **Node.js** | **Rust** |
|
| **功能特性** | **Java** | **Python** | **Go** | **C# ** | **Node.js** | **Rust** |
|
||||||
| ------------------------------ | -------- | ---------- | -------- | ------------------ | ----------- | -------- |
|
| ------------------------------ | -------- | ---------- | -------- | -------- | ----------- | -------- |
|
||||||
| **连接管理** | 支持 | 支持 | 支持 | N/A | 支持 | 支持 |
|
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||||
| **普通查询** | 支持 | 支持 | 支持 | N/A | 支持 | 支持 |
|
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||||
| **连续查询** | 支持 | 支持 | 支持 | N/A | 支持 | 支持 |
|
| **参数绑定** | 暂不支持 | 暂不支持 | 暂不支持 | 支持 | 暂不支持 | 支持 |
|
||||||
| **参数绑定** | 不支持 | 暂不支持 | 暂不支持 | N/A | 不支持 | 支持 |
|
| **数据订阅(TMQ)** | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 支持 |
|
||||||
| ** TMQ ** | 不支持 | 暂不支持 | 暂不支持 | N/A | 不支持 | 支持 |
|
| **Schemaless** | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 |
|
||||||
| **Schemaless** | 暂不支持 | 暂不支持 | 暂不支持 | N/A | 不支持 | 暂不支持 |
|
| **批量拉取(基于 WebSocket)** | 支持 | 支持 | 暂不支持 | 支持 | 暂不支持 | 支持 |
|
||||||
| **批量拉取(基于 WebSocket)** | 支持 | 支持 | 暂不支持 | N/A | 不支持 | 支持 |
|
| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
|
||||||
| **DataFrame** | 不支持 | 支持 | 不支持 | N/A | 不支持 | 不支持 |
|
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
sidebar_label: 支持的数据类型
|
sidebar_label: 数据类型
|
||||||
title: 支持的数据类型
|
title: 数据类型
|
||||||
description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类型等"
|
description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类型等"
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
sidebar_label: 数据库管理
|
sidebar_label: 数据库
|
||||||
title: 数据库管理
|
title: 数据库
|
||||||
description: "创建、删除数据库,查看、修改数据库参数"
|
description: "创建、删除数据库,查看、修改数据库参数"
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
title: 表管理
|
title: 表
|
||||||
sidebar_label: 表
|
sidebar_label: 表
|
||||||
description: 对表的各种管理操作
|
description: 对表的各种管理操作
|
||||||
---
|
---
|
||||||
|
@ -23,10 +23,7 @@ create_subtable_clause: {
|
||||||
}
|
}
|
||||||
|
|
||||||
create_definition:
|
create_definition:
|
||||||
col_name column_definition
|
col_name column_type
|
||||||
|
|
||||||
column_definition:
|
|
||||||
type_name [comment 'string_value']
|
|
||||||
|
|
||||||
table_options:
|
table_options:
|
||||||
table_option ...
|
table_option ...
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
sidebar_label: 超级表管理
|
sidebar_label: 超级表
|
||||||
title: 超级表 STable 管理
|
title: 超级表
|
||||||
description: 对超级表的各种管理操作
|
description: 对超级表的各种管理操作
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
@ -53,11 +53,6 @@ window_clause: {
|
||||||
| STATE_WINDOW(col)
|
| STATE_WINDOW(col)
|
||||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||||
|
|
||||||
changes_option: {
|
|
||||||
DURATION duration_val
|
|
||||||
| ROWS rows_val
|
|
||||||
}
|
|
||||||
|
|
||||||
group_by_clause:
|
group_by_clause:
|
||||||
GROUP BY expr [, expr] ... HAVING condition
|
GROUP BY expr [, expr] ... HAVING condition
|
||||||
|
|
||||||
|
@ -109,7 +104,7 @@ SELECT location, groupid, current FROM d1001 LIMIT 2;
|
||||||
|
|
||||||
### 结果去重
|
### 结果去重
|
||||||
|
|
||||||
`DISINTCT` 关键字可以对结果集中的一列或多列进行去重,去除的列既可以是标签列也可以是数据列。
|
`DISTINCT` 关键字可以对结果集中的一列或多列进行去重,去除的列既可以是标签列也可以是数据列。
|
||||||
|
|
||||||
对标签列去重:
|
对标签列去重:
|
||||||
|
|
||||||
|
@ -127,7 +122,6 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name;
|
||||||
|
|
||||||
1. cfg 文件中的配置参数 maxNumOfDistinctRes 将对 DISTINCT 能够输出的数据行数进行限制。其最小值是 100000,最大值是 100000000,默认值是 10000000。如果实际计算结果超出了这个限制,那么会仅输出这个数量范围内的部分。
|
1. cfg 文件中的配置参数 maxNumOfDistinctRes 将对 DISTINCT 能够输出的数据行数进行限制。其最小值是 100000,最大值是 100000000,默认值是 10000000。如果实际计算结果超出了这个限制,那么会仅输出这个数量范围内的部分。
|
||||||
2. 由于浮点数天然的精度机制原因,在特定情况下,对 FLOAT 和 DOUBLE 列使用 DISTINCT 并不能保证输出值的完全唯一性。
|
2. 由于浮点数天然的精度机制原因,在特定情况下,对 FLOAT 和 DOUBLE 列使用 DISTINCT 并不能保证输出值的完全唯一性。
|
||||||
3. 在当前版本下,DISTINCT 不能在嵌套查询的子查询中使用,也不能与聚合函数、GROUP BY、或 JOIN 在同一条语句中混用。
|
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
|
|
@ -127,7 +127,7 @@ SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
```
|
```
|
||||||
|
|
||||||
**功能说明**:获得指定字段的向下取整数的结果。
|
**功能说明**:获得指定字段的向下取整数的结果。
|
||||||
其他使用说明参见 CEIL 函数描述。
|
其他使用说明参见 CEIL 函数描述。
|
||||||
|
|
||||||
#### LOG
|
#### LOG
|
||||||
|
@ -174,7 +174,7 @@ SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
```
|
```
|
||||||
|
|
||||||
**功能说明**:获得指定字段的四舍五入的结果。
|
**功能说明**:获得指定字段的四舍五入的结果。
|
||||||
其他使用说明参见 CEIL 函数描述。
|
其他使用说明参见 CEIL 函数描述。
|
||||||
|
|
||||||
|
|
||||||
|
@ -435,7 +435,7 @@ SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
**使用说明**:
|
**使用说明**:
|
||||||
|
|
||||||
- timezone 参数允许输入的时区格式为: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。例如,TO_ISO8601(1, "+00:00")。
|
- timezone 参数允许输入的时区格式为: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。例如,TO_ISO8601(1, "+00:00")。
|
||||||
- 如果输入是表示 UNIX 时间戳的整形,返回格式精度由时间戳的位数决定;
|
- 如果输入是表示 UNIX 时间戳的整形,返回格式精度由时间戳的位数决定;
|
||||||
- 如果输入是 TIMESTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。
|
- 如果输入是 TIMESTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。
|
||||||
|
|
||||||
|
|
||||||
|
@ -770,14 +770,14 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam
|
||||||
|
|
||||||
**详细说明**:
|
**详细说明**:
|
||||||
- bin_type 用户指定的分桶类型, 有效输入类型为"user_input“, ”linear_bin", "log_bin"。
|
- bin_type 用户指定的分桶类型, 有效输入类型为"user_input“, ”linear_bin", "log_bin"。
|
||||||
- bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串):
|
- bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串):
|
||||||
- "user_input": "[1, 3, 5, 7]"
|
- "user_input": "[1, 3, 5, 7]"
|
||||||
用户指定 bin 的具体数值。
|
用户指定 bin 的具体数值。
|
||||||
|
|
||||||
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
|
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
|
||||||
"start" 表示数据起始点,"width" 表示每次 bin 偏移量, "count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点,
|
"start" 表示数据起始点,"width" 表示每次 bin 偏移量, "count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点,
|
||||||
生成区间为[-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]。
|
生成区间为[-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]。
|
||||||
|
|
||||||
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
|
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
|
||||||
"start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点,
|
"start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点,
|
||||||
生成区间为[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]。
|
生成区间为[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]。
|
||||||
|
@ -918,7 +918,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
|
|
||||||
**返回数据类型**:同应用的字段。
|
**返回数据类型**:同应用的字段。
|
||||||
|
|
||||||
**适用数据类型**:数值类型,时间戳类型。
|
**适用数据类型**:数值类型。
|
||||||
|
|
||||||
**适用于**:表和超级表。
|
**适用于**:表和超级表。
|
||||||
|
|
||||||
|
@ -933,7 +933,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
||||||
|
|
||||||
**返回数据类型**:同应用的字段。
|
**返回数据类型**:同应用的字段。
|
||||||
|
|
||||||
**适用数据类型**:数值类型,时间戳类型。
|
**适用数据类型**:数值类型。
|
||||||
|
|
||||||
**适用于**:表和超级表。
|
**适用于**:表和超级表。
|
||||||
|
|
||||||
|
@ -969,7 +969,7 @@ SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
|
|
||||||
**适用于**:表和超级表。
|
**适用于**:表和超级表。
|
||||||
|
|
||||||
**使用说明**:
|
**使用说明**:
|
||||||
|
|
||||||
- 不能参与表达式计算;该函数可以应用在普通表和超级表上;
|
- 不能参与表达式计算;该函数可以应用在普通表和超级表上;
|
||||||
- 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。
|
- 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。
|
||||||
|
@ -1047,10 +1047,10 @@ SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
|
|
||||||
**适用于**:表和超级表。
|
**适用于**:表和超级表。
|
||||||
|
|
||||||
**使用说明**:
|
**使用说明**:
|
||||||
|
|
||||||
- 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。
|
- 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。
|
||||||
- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。
|
- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。
|
||||||
- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
|
- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
|
||||||
|
|
||||||
|
|
||||||
|
@ -1068,8 +1068,8 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER
|
||||||
|
|
||||||
**适用于**:表和超级表。
|
**适用于**:表和超级表。
|
||||||
|
|
||||||
**使用说明**:
|
**使用说明**:
|
||||||
|
|
||||||
- DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
|
- DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
|
||||||
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。
|
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。
|
||||||
|
|
||||||
|
@ -1087,7 +1087,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER
|
||||||
|
|
||||||
**适用于**:表和超级表。
|
**适用于**:表和超级表。
|
||||||
|
|
||||||
**使用说明**:
|
**使用说明**:
|
||||||
|
|
||||||
- 输出结果行数是范围内总行数减一,第一行没有结果输出。
|
- 输出结果行数是范围内总行数减一,第一行没有结果输出。
|
||||||
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DIFF() from。
|
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DIFF() from。
|
||||||
|
@ -1124,9 +1124,9 @@ SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
|
|
||||||
**适用于**:表和超级表。
|
**适用于**:表和超级表。
|
||||||
|
|
||||||
**使用说明**:
|
**使用说明**:
|
||||||
|
|
||||||
- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1);
|
- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1);
|
||||||
- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用;
|
- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用;
|
||||||
- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
|
- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
sidebar_label: 时序数据特色查询
|
sidebar_label: 特色查询
|
||||||
title: 时序数据特色查询
|
title: 特色查询
|
||||||
description: TDengine 提供的时序数据特有的查询功能
|
description: TDengine 提供的时序数据特有的查询功能
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ window_clause: {
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE STREAM avg_vol_s INTO avg_vol AS
|
CREATE STREAM avg_vol_s INTO avg_vol AS
|
||||||
SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
|
SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
|
||||||
```
|
```
|
||||||
|
|
||||||
## 流式计算的 partition
|
## 流式计算的 partition
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
sidebar_label: JSON 类型使用说明
|
sidebar_label: JSON 类型
|
||||||
title: JSON 类型使用说明
|
title: JSON 类型
|
||||||
description: 对 JSON 类型如何使用的详细说明
|
description: 对 JSON 类型如何使用的详细说明
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
title: 转义字符说明
|
title: 转义字符
|
||||||
sidebar_label: 转义字符
|
sidebar_label: 转义字符
|
||||||
description: TDengine 中使用转义字符的详细规则
|
description: TDengine 中使用转义字符的详细规则
|
||||||
---
|
---
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
sidebar_label: 命名与边界限制
|
sidebar_label: 命名与边界
|
||||||
title: 命名与边界限制
|
title: 命名与边界
|
||||||
description: 合法字符集和命名中的限制规则
|
description: 合法字符集和命名中的限制规则
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
---
|
---
|
||||||
sidebar_label: 保留关键字
|
sidebar_label: 保留关键字
|
||||||
title: TDengine 保留关键字
|
title: 保留关键字
|
||||||
description: TDengine 保留关键字的详细列表
|
description: TDengine 保留关键字的详细列表
|
||||||
---
|
---
|
||||||
|
|
||||||
## 保留关键字
|
## 保留关键字
|
||||||
|
|
||||||
目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable 名、数据列名及标签列名等。这些关键字列表如下:
|
目前 TDengine 有 200 多个内部保留关键字,这些关键字如果需要用作库名、表名、超级表名、子表名、数据列名及标签列名等,无论大小写,需要使用符号 `` ` `` 将关键字括起来使用,例如 \`ADD\`。
|
||||||
|
|
||||||
|
关键字列表如下:
|
||||||
|
|
||||||
### A
|
### A
|
||||||
|
|
||||||
|
@ -15,15 +17,20 @@ description: TDengine 保留关键字的详细列表
|
||||||
- ACCOUNTS
|
- ACCOUNTS
|
||||||
- ADD
|
- ADD
|
||||||
- AFTER
|
- AFTER
|
||||||
|
- AGGREGATE
|
||||||
- ALL
|
- ALL
|
||||||
- ALTER
|
- ALTER
|
||||||
|
- ANALYZE
|
||||||
- AND
|
- AND
|
||||||
|
- APPS
|
||||||
- AS
|
- AS
|
||||||
- ASC
|
- ASC
|
||||||
|
- AT_ONCE
|
||||||
- ATTACH
|
- ATTACH
|
||||||
|
|
||||||
### B
|
### B
|
||||||
|
|
||||||
|
- BALANCE
|
||||||
- BEFORE
|
- BEFORE
|
||||||
- BEGIN
|
- BEGIN
|
||||||
- BETWEEN
|
- BETWEEN
|
||||||
|
@ -33,19 +40,27 @@ description: TDengine 保留关键字的详细列表
|
||||||
- BITNOT
|
- BITNOT
|
||||||
- BITOR
|
- BITOR
|
||||||
- BLOCKS
|
- BLOCKS
|
||||||
|
- BNODE
|
||||||
|
- BNODES
|
||||||
- BOOL
|
- BOOL
|
||||||
|
- BUFFER
|
||||||
|
- BUFSIZE
|
||||||
- BY
|
- BY
|
||||||
|
|
||||||
### C
|
### C
|
||||||
|
|
||||||
- CACHE
|
- CACHE
|
||||||
- CACHELAST
|
- CACHEMODEL
|
||||||
|
- CACHESIZE
|
||||||
- CASCADE
|
- CASCADE
|
||||||
|
- CAST
|
||||||
- CHANGE
|
- CHANGE
|
||||||
|
- CLIENT_VERSION
|
||||||
- CLUSTER
|
- CLUSTER
|
||||||
- COLON
|
- COLON
|
||||||
- COLUMN
|
- COLUMN
|
||||||
- COMMA
|
- COMMA
|
||||||
|
- COMMENT
|
||||||
- COMP
|
- COMP
|
||||||
- COMPACT
|
- COMPACT
|
||||||
- CONCAT
|
- CONCAT
|
||||||
|
@ -53,15 +68,18 @@ description: TDengine 保留关键字的详细列表
|
||||||
- CONNECTION
|
- CONNECTION
|
||||||
- CONNECTIONS
|
- CONNECTIONS
|
||||||
- CONNS
|
- CONNS
|
||||||
|
- CONSUMER
|
||||||
|
- CONSUMERS
|
||||||
|
- CONTAINS
|
||||||
- COPY
|
- COPY
|
||||||
|
- COUNT
|
||||||
- CREATE
|
- CREATE
|
||||||
- CTIME
|
- CURRENT_USER
|
||||||
|
|
||||||
### D
|
### D
|
||||||
|
|
||||||
- DATABASE
|
- DATABASE
|
||||||
- DATABASES
|
- DATABASES
|
||||||
- DAYS
|
|
||||||
- DBS
|
- DBS
|
||||||
- DEFERRED
|
- DEFERRED
|
||||||
- DELETE
|
- DELETE
|
||||||
|
@ -70,18 +88,23 @@ description: TDengine 保留关键字的详细列表
|
||||||
- DESCRIBE
|
- DESCRIBE
|
||||||
- DETACH
|
- DETACH
|
||||||
- DISTINCT
|
- DISTINCT
|
||||||
|
- DISTRIBUTED
|
||||||
- DIVIDE
|
- DIVIDE
|
||||||
- DNODE
|
- DNODE
|
||||||
- DNODES
|
- DNODES
|
||||||
- DOT
|
- DOT
|
||||||
- DOUBLE
|
- DOUBLE
|
||||||
- DROP
|
- DROP
|
||||||
|
- DURATION
|
||||||
|
|
||||||
### E
|
### E
|
||||||
|
|
||||||
|
- EACH
|
||||||
|
- ENABLE
|
||||||
- END
|
- END
|
||||||
- EQ
|
- EVERY
|
||||||
- EXISTS
|
- EXISTS
|
||||||
|
- EXPIRED
|
||||||
- EXPLAIN
|
- EXPLAIN
|
||||||
|
|
||||||
### F
|
### F
|
||||||
|
@ -89,18 +112,20 @@ description: TDengine 保留关键字的详细列表
|
||||||
- FAIL
|
- FAIL
|
||||||
- FILE
|
- FILE
|
||||||
- FILL
|
- FILL
|
||||||
|
- FIRST
|
||||||
- FLOAT
|
- FLOAT
|
||||||
|
- FLUSH
|
||||||
- FOR
|
- FOR
|
||||||
- FROM
|
- FROM
|
||||||
- FSYNC
|
- FUNCTION
|
||||||
|
- FUNCTIONS
|
||||||
|
|
||||||
### G
|
### G
|
||||||
|
|
||||||
- GE
|
|
||||||
- GLOB
|
- GLOB
|
||||||
|
- GRANT
|
||||||
- GRANTS
|
- GRANTS
|
||||||
- GROUP
|
- GROUP
|
||||||
- GT
|
|
||||||
|
|
||||||
### H
|
### H
|
||||||
|
|
||||||
|
@ -111,15 +136,18 @@ description: TDengine 保留关键字的详细列表
|
||||||
- ID
|
- ID
|
||||||
- IF
|
- IF
|
||||||
- IGNORE
|
- IGNORE
|
||||||
- IMMEDIA
|
- IMMEDIATE
|
||||||
- IMPORT
|
- IMPORT
|
||||||
- IN
|
- IN
|
||||||
- INITIAL
|
- INDEX
|
||||||
|
- INDEXES
|
||||||
|
- INITIALLY
|
||||||
|
- INNER
|
||||||
- INSERT
|
- INSERT
|
||||||
- INSTEAD
|
- INSTEAD
|
||||||
- INT
|
- INT
|
||||||
- INTEGER
|
- INTEGER
|
||||||
- INTERVA
|
- INTERVAL
|
||||||
- INTO
|
- INTO
|
||||||
- IS
|
- IS
|
||||||
- ISNULL
|
- ISNULL
|
||||||
|
@ -127,6 +155,7 @@ description: TDengine 保留关键字的详细列表
|
||||||
### J
|
### J
|
||||||
|
|
||||||
- JOIN
|
- JOIN
|
||||||
|
- JSON
|
||||||
|
|
||||||
### K
|
### K
|
||||||
|
|
||||||
|
@ -136,46 +165,57 @@ description: TDengine 保留关键字的详细列表
|
||||||
|
|
||||||
### L
|
### L
|
||||||
|
|
||||||
- LE
|
- LAST
|
||||||
|
- LAST_ROW
|
||||||
|
- LICENCES
|
||||||
- LIKE
|
- LIKE
|
||||||
- LIMIT
|
- LIMIT
|
||||||
- LINEAR
|
- LINEAR
|
||||||
- LOCAL
|
- LOCAL
|
||||||
- LP
|
|
||||||
- LSHIFT
|
|
||||||
- LT
|
|
||||||
|
|
||||||
### M
|
### M
|
||||||
|
|
||||||
- MATCH
|
- MATCH
|
||||||
|
- MAX_DELAY
|
||||||
- MAXROWS
|
- MAXROWS
|
||||||
|
- MERGE
|
||||||
|
- META
|
||||||
- MINROWS
|
- MINROWS
|
||||||
- MINUS
|
- MINUS
|
||||||
|
- MNODE
|
||||||
- MNODES
|
- MNODES
|
||||||
- MODIFY
|
- MODIFY
|
||||||
- MODULES
|
- MODULES
|
||||||
|
|
||||||
### N
|
### N
|
||||||
|
|
||||||
- NE
|
- NCHAR
|
||||||
|
- NEXT
|
||||||
|
- NMATCH
|
||||||
- NONE
|
- NONE
|
||||||
- NOT
|
- NOT
|
||||||
- NOTNULL
|
- NOTNULL
|
||||||
- NOW
|
- NOW
|
||||||
- NULL
|
- NULL
|
||||||
|
- NULLS
|
||||||
|
|
||||||
### O
|
### O
|
||||||
|
|
||||||
- OF
|
- OF
|
||||||
- OFFSET
|
- OFFSET
|
||||||
|
- ON
|
||||||
- OR
|
- OR
|
||||||
- ORDER
|
- ORDER
|
||||||
|
- OUTPUTTYPE
|
||||||
|
|
||||||
### P
|
### P
|
||||||
|
|
||||||
- PARTITION
|
- PAGES
|
||||||
|
- PAGESIZE
|
||||||
|
- PARTITIONS
|
||||||
- PASS
|
- PASS
|
||||||
- PLUS
|
- PLUS
|
||||||
|
- PORT
|
||||||
- PPS
|
- PPS
|
||||||
- PRECISION
|
- PRECISION
|
||||||
- PREV
|
- PREV
|
||||||
|
@ -183,47 +223,63 @@ description: TDengine 保留关键字的详细列表
|
||||||
|
|
||||||
### Q
|
### Q
|
||||||
|
|
||||||
|
- QNODE
|
||||||
|
- QNODES
|
||||||
- QTIME
|
- QTIME
|
||||||
- QUERIE
|
- QUERIES
|
||||||
- QUERY
|
- QUERY
|
||||||
- QUORUM
|
|
||||||
|
|
||||||
### R
|
### R
|
||||||
|
|
||||||
- RAISE
|
- RAISE
|
||||||
- REM
|
- RANGE
|
||||||
|
- RATIO
|
||||||
|
- READ
|
||||||
|
- REDISTRIBUTE
|
||||||
|
- RENAME
|
||||||
- REPLACE
|
- REPLACE
|
||||||
- REPLICA
|
- REPLICA
|
||||||
- RESET
|
- RESET
|
||||||
- RESTRIC
|
- RESTRICT
|
||||||
|
- RETENTIONS
|
||||||
|
- REVOKE
|
||||||
|
- ROLLUP
|
||||||
- ROW
|
- ROW
|
||||||
- RP
|
|
||||||
- RSHIFT
|
|
||||||
|
|
||||||
### S
|
### S
|
||||||
|
|
||||||
|
- SCHEMALESS
|
||||||
- SCORES
|
- SCORES
|
||||||
- SELECT
|
- SELECT
|
||||||
- SEMI
|
- SEMI
|
||||||
|
- SERVER_STATUS
|
||||||
|
- SERVER_VERSION
|
||||||
- SESSION
|
- SESSION
|
||||||
- SET
|
- SET
|
||||||
- SHOW
|
- SHOW
|
||||||
- SLASH
|
- SINGLE_STABLE
|
||||||
- SLIDING
|
- SLIDING
|
||||||
- SLIMIT
|
- SLIMIT
|
||||||
- SMALLIN
|
- SMA
|
||||||
|
- SMALLINT
|
||||||
|
- SNODE
|
||||||
|
- SNODES
|
||||||
- SOFFSET
|
- SOFFSET
|
||||||
- STable
|
- SPLIT
|
||||||
- STableS
|
- STABLE
|
||||||
|
- STABLES
|
||||||
- STAR
|
- STAR
|
||||||
- STATE
|
- STATE
|
||||||
- STATEMEN
|
- STATE_WINDOW
|
||||||
- STATE_WI
|
- STATEMENT
|
||||||
- STORAGE
|
- STORAGE
|
||||||
- STREAM
|
- STREAM
|
||||||
- STREAMS
|
- STREAMS
|
||||||
|
- STRICT
|
||||||
- STRING
|
- STRING
|
||||||
|
- SUBSCRIPTIONS
|
||||||
- SYNCDB
|
- SYNCDB
|
||||||
|
- SYSINFO
|
||||||
|
|
||||||
### T
|
### T
|
||||||
|
|
||||||
|
@ -234,19 +290,24 @@ description: TDengine 保留关键字的详细列表
|
||||||
- TBNAME
|
- TBNAME
|
||||||
- TIMES
|
- TIMES
|
||||||
- TIMESTAMP
|
- TIMESTAMP
|
||||||
|
- TIMEZONE
|
||||||
- TINYINT
|
- TINYINT
|
||||||
|
- TO
|
||||||
|
- TODAY
|
||||||
- TOPIC
|
- TOPIC
|
||||||
- TOPICS
|
- TOPICS
|
||||||
|
- TRANSACTION
|
||||||
|
- TRANSACTIONS
|
||||||
- TRIGGER
|
- TRIGGER
|
||||||
|
- TRIM
|
||||||
- TSERIES
|
- TSERIES
|
||||||
|
- TTL
|
||||||
|
|
||||||
### U
|
### U
|
||||||
|
|
||||||
- UMINUS
|
|
||||||
- UNION
|
- UNION
|
||||||
- UNSIGNED
|
- UNSIGNED
|
||||||
- UPDATE
|
- UPDATE
|
||||||
- UPLUS
|
|
||||||
- USE
|
- USE
|
||||||
- USER
|
- USER
|
||||||
- USERS
|
- USERS
|
||||||
|
@ -254,9 +315,13 @@ description: TDengine 保留关键字的详细列表
|
||||||
|
|
||||||
### V
|
### V
|
||||||
|
|
||||||
|
- VALUE
|
||||||
- VALUES
|
- VALUES
|
||||||
|
- VARCHAR
|
||||||
- VARIABLE
|
- VARIABLE
|
||||||
- VARIABLES
|
- VARIABLES
|
||||||
|
- VERBOSE
|
||||||
|
- VGROUP
|
||||||
- VGROUPS
|
- VGROUPS
|
||||||
- VIEW
|
- VIEW
|
||||||
- VNODES
|
- VNODES
|
||||||
|
@ -264,14 +329,25 @@ description: TDengine 保留关键字的详细列表
|
||||||
### W
|
### W
|
||||||
|
|
||||||
- WAL
|
- WAL
|
||||||
|
- WAL_FSYNC_PERIOD
|
||||||
|
- WAL_LEVEL
|
||||||
|
- WAL_RETENTION_PERIOD
|
||||||
|
- WAL_RETENTION_SIZE
|
||||||
|
- WAL_ROLL_PERIOD
|
||||||
|
- WAL_SEGMENT_SIZE
|
||||||
|
- WATERMARK
|
||||||
- WHERE
|
- WHERE
|
||||||
|
- WINDOW_CLOSE
|
||||||
|
- WITH
|
||||||
|
- WRITE
|
||||||
|
|
||||||
### \_
|
### \_
|
||||||
|
|
||||||
- \_C0
|
- \_C0
|
||||||
- \_QSTART
|
|
||||||
- \_QSTOP
|
|
||||||
- \_QDURATION
|
- \_QDURATION
|
||||||
- \_WSTART
|
- \_QEND
|
||||||
- \_WSTOP
|
- \_QSTART
|
||||||
|
- \_ROWTS
|
||||||
- \_WDURATION
|
- \_WDURATION
|
||||||
|
- \_WEND
|
||||||
|
- \_WSTART
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
sidebar_label: 元数据
|
sidebar_label: 元数据
|
||||||
title: 存储元数据的 Information_Schema 数据库
|
title: 元数据
|
||||||
description: Information_Schema 数据库中存储了系统中所有的元数据信息
|
description: Information_Schema 数据库中存储了系统中所有的元数据信息
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
sidebar_label: 统计数据
|
sidebar_label: 统计数据
|
||||||
title: 存储统计数据的 Performance_Schema 数据库
|
title: 统计数据
|
||||||
description: Performance_Schema 数据库中存储了系统中的各种统计信息
|
description: Performance_Schema 数据库中存储了系统中的各种统计信息
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
@ -1,21 +1,11 @@
|
||||||
---
|
---
|
||||||
sidebar_label: SHOW 命令
|
sidebar_label: SHOW 命令
|
||||||
title: 使用 SHOW 命令查看系统元数据
|
title: SHOW 命令
|
||||||
description: SHOW 命令的完整列表
|
description: SHOW 命令的完整列表
|
||||||
---
|
---
|
||||||
|
|
||||||
SHOW 命令可以用来获取简要的系统信息。若想获取系统中详细的各种元数据、系统信息和状态,请使用 select 语句查询 INFORMATION_SCHEMA 数据库中的表。
|
SHOW 命令可以用来获取简要的系统信息。若想获取系统中详细的各种元数据、系统信息和状态,请使用 select 语句查询 INFORMATION_SCHEMA 数据库中的表。
|
||||||
|
|
||||||
## SHOW ACCOUNTS
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SHOW ACCOUNTS;
|
|
||||||
```
|
|
||||||
|
|
||||||
显示当前系统中所有租户的信息。
|
|
||||||
|
|
||||||
注:企业版独有
|
|
||||||
|
|
||||||
## SHOW APPS
|
## SHOW APPS
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
sidebar_label: 自定义函数
|
sidebar_label: 自定义函数
|
||||||
title: 用户自定义函数
|
title: 自定义函数
|
||||||
description: 使用 UDF 的详细指南
|
description: 使用 UDF 的详细指南
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
sidebar_label: 索引
|
sidebar_label: 索引
|
||||||
title: 使用索引
|
title: 索引
|
||||||
description: 索引功能的使用细节
|
description: 索引功能的使用细节
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
sidebar_label: 3.0 版本语法变更
|
sidebar_label: 语法变更
|
||||||
title: 3.0 版本语法变更
|
title: 语法变更
|
||||||
description: "TDengine 3.0 版本的语法变更说明"
|
description: "TDengine 3.0 版本的语法变更说明"
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
@ -79,7 +79,7 @@ password = "taosdata"
|
||||||
|
|
||||||
# 需要被监控的 taosAdapter
|
# 需要被监控的 taosAdapter
|
||||||
[taosAdapter]
|
[taosAdapter]
|
||||||
address = ["127.0.0.1:6041","192.168.1.95:6041"]
|
address = ["127.0.0.1:6041"]
|
||||||
|
|
||||||
[metrics]
|
[metrics]
|
||||||
# 监控指标前缀
|
# 监控指标前缀
|
||||||
|
@ -92,7 +92,7 @@ cluster = "production"
|
||||||
database = "log"
|
database = "log"
|
||||||
|
|
||||||
# 指定需要监控的普通表
|
# 指定需要监控的普通表
|
||||||
tables = ["normal_table"]
|
tables = []
|
||||||
```
|
```
|
||||||
|
|
||||||
### 获取监控指标
|
### 获取监控指标
|
||||||
|
@ -141,4 +141,4 @@ taos_cluster_info_dnodes_total{cluster_id="5981392874047724755"} 1
|
||||||
# HELP taos_cluster_info_first_ep
|
# HELP taos_cluster_info_first_ep
|
||||||
# TYPE taos_cluster_info_first_ep gauge
|
# TYPE taos_cluster_info_first_ep gauge
|
||||||
taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1
|
taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1
|
||||||
```
|
```
|
||||||
|
|
|
@ -26,7 +26,7 @@ TDengine 分布式架构的逻辑结构图如下:
|
||||||
|
|
||||||
**管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中 M)。同时,管理节点也负责元数据(包括用户、数据库、超级表等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(最多不超过 3 个)mnode,它们自动构建成为一个虚拟管理节点组(图中 M1,M2,M3)。mnode 支持多副本,采用 RAFT 一致性协议,保证系统的高可用与高可靠,任何数据更新操作只能在 Leader 上进行。mnode 集群的第一个节点在集群部署时自动完成,其他节点的创建与删除由用户通过 SQL 命令完成。每个 dnode 上至多有一个 mnode,由所属的数据节点的 EP 来唯一标识。每个 dnode 通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的 EP。
|
**管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中 M)。同时,管理节点也负责元数据(包括用户、数据库、超级表等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(最多不超过 3 个)mnode,它们自动构建成为一个虚拟管理节点组(图中 M1,M2,M3)。mnode 支持多副本,采用 RAFT 一致性协议,保证系统的高可用与高可靠,任何数据更新操作只能在 Leader 上进行。mnode 集群的第一个节点在集群部署时自动完成,其他节点的创建与删除由用户通过 SQL 命令完成。每个 dnode 上至多有一个 mnode,由所属的数据节点的 EP 来唯一标识。每个 dnode 通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的 EP。
|
||||||
|
|
||||||
**弹性计算节点(qnode):** 一个虚拟的逻辑单元,运行查询计算任务,也包括基于系统表来实现的 show 命令(图中 Q)。集群中可配置多个 qnode,在整个集群内部共享使用(图中 Q1,Q2,Q3)。qnode 不与具体的 DB 绑定,即一个 qnode 可以同时执行多个 DB 的查询任务。每个 dnode 上至多有一个 qnode,由所属的数据节点的 EP 来唯一标识。客户端通过与 mnode 交互,获取可用的 qnode 列表,当没有可用的 qnode 时,计算任务在 vnode 中执行。
|
**计算节点(qnode):** 一个虚拟的逻辑单元,运行查询计算任务,也包括基于系统表来实现的 show 命令(图中 Q)。集群中可配置多个 qnode,在整个集群内部共享使用(图中 Q1,Q2,Q3)。qnode 不与具体的 DB 绑定,即一个 qnode 可以同时执行多个 DB 的查询任务。每个 dnode 上至多有一个 qnode,由所属的数据节点的 EP 来唯一标识。客户端通过与 mnode 交互,获取可用的 qnode 列表,当没有可用的 qnode 时,计算任务在 vnode 中执行。当一个查询执行时,依赖执行计划,调度器会安排一个或多个 qnode 来一起执行。qnode 能从 vnode 获取数据,也可以将自己的计算结果发给其他 qnode 做进一步的处理。通过引入独立的计算节点,TDengine 实现了存储和计算分离。
|
||||||
|
|
||||||
**流计算节点(snode):** 一个虚拟的逻辑单元,只运行流计算任务(图中 S)。集群中可配置多个 snode,在整个集群内部共享使用(图中 S1,S2,S3)。snode 不与具体的 stream 绑定,即一个 snode 可以同时执行多个 stream 的计算任务。每个 dnode 上至多有一个 snode,由所属的数据节点的 EP 来唯一标识。由 mnode 调度可用的 snode 完成流计算任务,当没有可用的 snode 时,流计算任务在 vnode 中执行。
|
**流计算节点(snode):** 一个虚拟的逻辑单元,只运行流计算任务(图中 S)。集群中可配置多个 snode,在整个集群内部共享使用(图中 S1,S2,S3)。snode 不与具体的 stream 绑定,即一个 snode 可以同时执行多个 stream 的计算任务。每个 dnode 上至多有一个 snode,由所属的数据节点的 EP 来唯一标识。由 mnode 调度可用的 snode 完成流计算任务,当没有可用的 snode 时,流计算任务在 vnode 中执行。
|
||||||
|
|
||||||
|
|
|
@ -6,11 +6,7 @@ description: TDengine 发布历史、Release Notes 及下载链接
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
## 3.0.0.1
|
## 3.0.1.0
|
||||||
|
|
||||||
<Release type="tdengine" version="3.0.0.1" />
|
<Release type="tdengine" version="3.0.1.0" />
|
||||||
|
|
||||||
<!-- ## 3.0.0.0
|
|
||||||
|
|
||||||
<Release type="tdengine" version="3.0.0.0" /> -->
|
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,6 @@ description: taosTools 的发布历史、Release Notes 和下载链接
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
## 2.1.2
|
## 2.1.3
|
||||||
|
|
||||||
<Release type="tools" version="2.1.2" />
|
<Release type="tools" version="2.1.3" />
|
||||||
|
|
|
@ -65,13 +65,6 @@ typedef enum {
|
||||||
TSDB_STATIS_NONE = 1, // statis part not exist
|
TSDB_STATIS_NONE = 1, // statis part not exist
|
||||||
} ETsdbStatisStatus;
|
} ETsdbStatisStatus;
|
||||||
|
|
||||||
typedef enum {
|
|
||||||
TSDB_SMA_STAT_UNKNOWN = -1, // unknown
|
|
||||||
TSDB_SMA_STAT_OK = 0, // ready to provide service
|
|
||||||
TSDB_SMA_STAT_EXPIRED = 1, // not ready or expired
|
|
||||||
TSDB_SMA_STAT_DROPPED = 2, // sma dropped
|
|
||||||
} ETsdbSmaStat; // bit operation
|
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
TSDB_SMA_TYPE_BLOCK = 0, // Block-wise SMA
|
TSDB_SMA_TYPE_BLOCK = 0, // Block-wise SMA
|
||||||
TSDB_SMA_TYPE_TIME_RANGE = 1, // Time-range-wise SMA
|
TSDB_SMA_TYPE_TIME_RANGE = 1, // Time-range-wise SMA
|
||||||
|
|
|
@ -45,8 +45,8 @@ enum {
|
||||||
// clang-format on
|
// clang-format on
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
TSKEY ts;
|
|
||||||
uint64_t groupId;
|
uint64_t groupId;
|
||||||
|
TSKEY ts;
|
||||||
} SWinKey;
|
} SWinKey;
|
||||||
|
|
||||||
static inline int SWinKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) {
|
static inline int SWinKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) {
|
||||||
|
@ -68,6 +68,37 @@ static inline int SWinKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, i
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
uint64_t groupId;
|
||||||
|
TSKEY ts;
|
||||||
|
int32_t exprIdx;
|
||||||
|
} STupleKey;
|
||||||
|
|
||||||
|
static inline int STupleKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) {
|
||||||
|
STupleKey* pTuple1 = (STupleKey*)pKey1;
|
||||||
|
STupleKey* pTuple2 = (STupleKey*)pKey2;
|
||||||
|
|
||||||
|
if (pTuple1->groupId > pTuple2->groupId) {
|
||||||
|
return 1;
|
||||||
|
} else if (pTuple1->groupId < pTuple2->groupId) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pTuple1->ts > pTuple2->ts) {
|
||||||
|
return 1;
|
||||||
|
} else if (pTuple1->ts < pTuple2->ts) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pTuple1->exprIdx > pTuple2->exprIdx) {
|
||||||
|
return 1;
|
||||||
|
} else if (pTuple1->exprIdx < pTuple2->exprIdx) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
TMQ_MSG_TYPE__DUMMY = 0,
|
TMQ_MSG_TYPE__DUMMY = 0,
|
||||||
TMQ_MSG_TYPE__POLL_RSP,
|
TMQ_MSG_TYPE__POLL_RSP,
|
||||||
|
@ -184,7 +215,6 @@ typedef struct SQueryTableDataCond {
|
||||||
STimeWindow twindows;
|
STimeWindow twindows;
|
||||||
int64_t startVersion;
|
int64_t startVersion;
|
||||||
int64_t endVersion;
|
int64_t endVersion;
|
||||||
int64_t schemaVersion;
|
|
||||||
} SQueryTableDataCond;
|
} SQueryTableDataCond;
|
||||||
|
|
||||||
int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock);
|
int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock);
|
||||||
|
|
|
@ -184,7 +184,8 @@ static FORCE_INLINE void colDataAppendDouble(SColumnInfoData* pColumnInfoData, u
|
||||||
int32_t getJsonValueLen(const char* data);
|
int32_t getJsonValueLen(const char* data);
|
||||||
|
|
||||||
int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, bool isNull);
|
int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, bool isNull);
|
||||||
int32_t colDataAppendNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, uint32_t numOfRows);
|
int32_t colDataAppendNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData,
|
||||||
|
uint32_t numOfRows);
|
||||||
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity,
|
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity,
|
||||||
const SColumnInfoData* pSource, int32_t numOfRow2);
|
const SColumnInfoData* pSource, int32_t numOfRow2);
|
||||||
int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows,
|
int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows,
|
||||||
|
@ -225,15 +226,16 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize);
|
||||||
int32_t blockDataTrimFirstNRows(SSDataBlock* pBlock, size_t n);
|
int32_t blockDataTrimFirstNRows(SSDataBlock* pBlock, size_t n);
|
||||||
int32_t blockDataKeepFirstNRows(SSDataBlock* pBlock, size_t n);
|
int32_t blockDataKeepFirstNRows(SSDataBlock* pBlock, size_t n);
|
||||||
|
|
||||||
int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src);
|
int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src);
|
||||||
int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src);
|
int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src);
|
||||||
|
|
||||||
SSDataBlock* createDataBlock();
|
SSDataBlock* createDataBlock();
|
||||||
void* blockDataDestroy(SSDataBlock* pBlock);
|
void* blockDataDestroy(SSDataBlock* pBlock);
|
||||||
void blockDataFreeRes(SSDataBlock* pBlock);
|
void blockDataFreeRes(SSDataBlock* pBlock);
|
||||||
SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData);
|
SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData);
|
||||||
|
SSDataBlock* createSpecialDataBlock(EStreamType type);
|
||||||
|
|
||||||
int32_t blockDataAppendColInfo(SSDataBlock* pBlock, SColumnInfoData* pColInfoData);
|
int32_t blockDataAppendColInfo(SSDataBlock* pBlock, SColumnInfoData* pColInfoData);
|
||||||
|
|
||||||
SColumnInfoData createColumnInfoData(int16_t type, int32_t bytes, int16_t colId);
|
SColumnInfoData createColumnInfoData(int16_t type, int32_t bytes, int16_t colId);
|
||||||
SColumnInfoData* bdGetColumnInfoData(const SSDataBlock* pBlock, int32_t index);
|
SColumnInfoData* bdGetColumnInfoData(const SSDataBlock* pBlock, int32_t index);
|
||||||
|
@ -249,7 +251,6 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf);
|
||||||
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlocks, STSchema* pTSchema, int32_t vgId,
|
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlocks, STSchema* pTSchema, int32_t vgId,
|
||||||
tb_uid_t suid);
|
tb_uid_t suid);
|
||||||
|
|
||||||
|
|
||||||
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);
|
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);
|
||||||
|
|
||||||
static FORCE_INLINE int32_t blockGetEncodeSize(const SSDataBlock* pBlock) {
|
static FORCE_INLINE int32_t blockGetEncodeSize(const SSDataBlock* pBlock) {
|
||||||
|
|
|
@ -36,8 +36,13 @@ typedef struct STSRow2 STSRow2;
|
||||||
typedef struct STSRowBuilder STSRowBuilder;
|
typedef struct STSRowBuilder STSRowBuilder;
|
||||||
typedef struct STagVal STagVal;
|
typedef struct STagVal STagVal;
|
||||||
typedef struct STag STag;
|
typedef struct STag STag;
|
||||||
|
typedef struct SColData SColData;
|
||||||
|
|
||||||
// bitmap
|
#define HAS_NONE ((uint8_t)0x1)
|
||||||
|
#define HAS_NULL ((uint8_t)0x2)
|
||||||
|
#define HAS_VALUE ((uint8_t)0x4)
|
||||||
|
|
||||||
|
// bitmap ================================
|
||||||
const static uint8_t BIT2_MAP[4][4] = {{0b00000000, 0b00000001, 0b00000010, 0},
|
const static uint8_t BIT2_MAP[4][4] = {{0b00000000, 0b00000001, 0b00000010, 0},
|
||||||
{0b00000000, 0b00000100, 0b00001000, 2},
|
{0b00000000, 0b00000100, 0b00001000, 2},
|
||||||
{0b00000000, 0b00010000, 0b00100000, 4},
|
{0b00000000, 0b00010000, 0b00100000, 4},
|
||||||
|
@ -51,21 +56,21 @@ const static uint8_t BIT2_MAP[4][4] = {{0b00000000, 0b00000001, 0b00000010, 0},
|
||||||
#define SET_BIT2(p, i, v) ((p)[(i) >> 2] = (p)[(i) >> 2] & N1(BIT2_MAP[(i)&3][3]) | BIT2_MAP[(i)&3][(v)])
|
#define SET_BIT2(p, i, v) ((p)[(i) >> 2] = (p)[(i) >> 2] & N1(BIT2_MAP[(i)&3][3]) | BIT2_MAP[(i)&3][(v)])
|
||||||
#define GET_BIT2(p, i) (((p)[(i) >> 2] >> BIT2_MAP[(i)&3][3]) & ((uint8_t)3))
|
#define GET_BIT2(p, i) (((p)[(i) >> 2] >> BIT2_MAP[(i)&3][3]) & ((uint8_t)3))
|
||||||
|
|
||||||
// STSchema
|
// STSchema ================================
|
||||||
int32_t tTSchemaCreate(int32_t sver, SSchema *pSchema, int32_t nCols, STSchema **ppTSchema);
|
int32_t tTSchemaCreate(int32_t sver, SSchema *pSchema, int32_t nCols, STSchema **ppTSchema);
|
||||||
void tTSchemaDestroy(STSchema *pTSchema);
|
void tTSchemaDestroy(STSchema *pTSchema);
|
||||||
|
|
||||||
// SValue
|
// SValue ================================
|
||||||
int32_t tPutValue(uint8_t *p, SValue *pValue, int8_t type);
|
int32_t tPutValue(uint8_t *p, SValue *pValue, int8_t type);
|
||||||
int32_t tGetValue(uint8_t *p, SValue *pValue, int8_t type);
|
int32_t tGetValue(uint8_t *p, SValue *pValue, int8_t type);
|
||||||
int tValueCmprFn(const SValue *pValue1, const SValue *pValue2, int8_t type);
|
int tValueCmprFn(const SValue *pValue1, const SValue *pValue2, int8_t type);
|
||||||
|
|
||||||
// SColVal
|
// SColVal ================================
|
||||||
#define COL_VAL_NONE(CID, TYPE) ((SColVal){.cid = (CID), .type = (TYPE), .isNone = 1})
|
#define COL_VAL_NONE(CID, TYPE) ((SColVal){.cid = (CID), .type = (TYPE), .isNone = 1})
|
||||||
#define COL_VAL_NULL(CID, TYPE) ((SColVal){.cid = (CID), .type = (TYPE), .isNull = 1})
|
#define COL_VAL_NULL(CID, TYPE) ((SColVal){.cid = (CID), .type = (TYPE), .isNull = 1})
|
||||||
#define COL_VAL_VALUE(CID, TYPE, V) ((SColVal){.cid = (CID), .type = (TYPE), .value = (V)})
|
#define COL_VAL_VALUE(CID, TYPE, V) ((SColVal){.cid = (CID), .type = (TYPE), .value = (V)})
|
||||||
|
|
||||||
// STSRow2
|
// STSRow2 ================================
|
||||||
#define TSROW_LEN(PROW, V) tGetI32v((uint8_t *)(PROW)->data, (V) ? &(V) : NULL)
|
#define TSROW_LEN(PROW, V) tGetI32v((uint8_t *)(PROW)->data, (V) ? &(V) : NULL)
|
||||||
#define TSROW_SVER(PROW, V) tGetI32v((PROW)->data + TSROW_LEN(PROW, NULL), (V) ? &(V) : NULL)
|
#define TSROW_SVER(PROW, V) tGetI32v((PROW)->data + TSROW_LEN(PROW, NULL), (V) ? &(V) : NULL)
|
||||||
|
|
||||||
|
@ -77,7 +82,7 @@ int32_t tTSRowToArray(STSRow2 *pRow, STSchema *pTSchema, SArray **ppArray);
|
||||||
int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow);
|
int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow);
|
||||||
int32_t tGetTSRow(uint8_t *p, STSRow2 **ppRow);
|
int32_t tGetTSRow(uint8_t *p, STSRow2 **ppRow);
|
||||||
|
|
||||||
// STSRowBuilder
|
// STSRowBuilder ================================
|
||||||
#define tsRowBuilderInit() ((STSRowBuilder){0})
|
#define tsRowBuilderInit() ((STSRowBuilder){0})
|
||||||
#define tsRowBuilderClear(B) \
|
#define tsRowBuilderClear(B) \
|
||||||
do { \
|
do { \
|
||||||
|
@ -86,7 +91,7 @@ int32_t tGetTSRow(uint8_t *p, STSRow2 **ppRow);
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
// STag
|
// STag ================================
|
||||||
int32_t tTagNew(SArray *pArray, int32_t version, int8_t isJson, STag **ppTag);
|
int32_t tTagNew(SArray *pArray, int32_t version, int8_t isJson, STag **ppTag);
|
||||||
void tTagFree(STag *pTag);
|
void tTagFree(STag *pTag);
|
||||||
bool tTagIsJson(const void *pTag);
|
bool tTagIsJson(const void *pTag);
|
||||||
|
@ -100,7 +105,16 @@ void tTagSetCid(const STag *pTag, int16_t iTag, int16_t cid);
|
||||||
void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove
|
void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove
|
||||||
int32_t parseJsontoTagData(const char *json, SArray *pTagVals, STag **ppTag, void *pMsgBuf);
|
int32_t parseJsontoTagData(const char *json, SArray *pTagVals, STag **ppTag, void *pMsgBuf);
|
||||||
|
|
||||||
// STRUCT =================
|
// SColData ================================
|
||||||
|
void tColDataDestroy(void *ph);
|
||||||
|
void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t smaOn);
|
||||||
|
void tColDataClear(SColData *pColData);
|
||||||
|
int32_t tColDataAppendValue(SColData *pColData, SColVal *pColVal);
|
||||||
|
void tColDataGetValue(SColData *pColData, int32_t iVal, SColVal *pColVal);
|
||||||
|
uint8_t tColDataGetBitValue(SColData *pColData, int32_t iVal);
|
||||||
|
int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest);
|
||||||
|
|
||||||
|
// STRUCT ================================
|
||||||
struct STColumn {
|
struct STColumn {
|
||||||
col_id_t colId;
|
col_id_t colId;
|
||||||
int8_t type;
|
int8_t type;
|
||||||
|
@ -166,6 +180,18 @@ struct SColVal {
|
||||||
SValue value;
|
SValue value;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct SColData {
|
||||||
|
int16_t cid;
|
||||||
|
int8_t type;
|
||||||
|
int8_t smaOn;
|
||||||
|
int32_t nVal;
|
||||||
|
uint8_t flag;
|
||||||
|
uint8_t *pBitMap;
|
||||||
|
int32_t *aOffset;
|
||||||
|
int32_t nData;
|
||||||
|
uint8_t *pData;
|
||||||
|
};
|
||||||
|
|
||||||
#pragma pack(push, 1)
|
#pragma pack(push, 1)
|
||||||
struct STagVal {
|
struct STagVal {
|
||||||
// char colName[TSDB_COL_NAME_LEN]; // only used for tmq_get_meta
|
// char colName[TSDB_COL_NAME_LEN]; // only used for tmq_get_meta
|
||||||
|
|
|
@ -784,6 +784,10 @@ typedef struct {
|
||||||
int64_t walRetentionSize;
|
int64_t walRetentionSize;
|
||||||
int32_t walRollPeriod;
|
int32_t walRollPeriod;
|
||||||
int64_t walSegmentSize;
|
int64_t walSegmentSize;
|
||||||
|
int32_t sstTrigger;
|
||||||
|
int16_t hashPrefix;
|
||||||
|
int16_t hashSuffix;
|
||||||
|
int32_t tsdbPageSize;
|
||||||
} SCreateDbReq;
|
} SCreateDbReq;
|
||||||
|
|
||||||
int32_t tSerializeSCreateDbReq(void* buf, int32_t bufLen, SCreateDbReq* pReq);
|
int32_t tSerializeSCreateDbReq(void* buf, int32_t bufLen, SCreateDbReq* pReq);
|
||||||
|
@ -805,6 +809,7 @@ typedef struct {
|
||||||
int8_t strict;
|
int8_t strict;
|
||||||
int8_t cacheLast;
|
int8_t cacheLast;
|
||||||
int8_t replications;
|
int8_t replications;
|
||||||
|
int32_t sstTrigger;
|
||||||
} SAlterDbReq;
|
} SAlterDbReq;
|
||||||
|
|
||||||
int32_t tSerializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq);
|
int32_t tSerializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq);
|
||||||
|
@ -841,6 +846,8 @@ typedef struct {
|
||||||
int64_t uid;
|
int64_t uid;
|
||||||
int32_t vgVersion;
|
int32_t vgVersion;
|
||||||
int32_t vgNum;
|
int32_t vgNum;
|
||||||
|
int16_t hashPrefix;
|
||||||
|
int16_t hashSuffix;
|
||||||
int8_t hashMethod;
|
int8_t hashMethod;
|
||||||
SArray* pVgroupInfos; // Array of SVgroupInfo
|
SArray* pVgroupInfos; // Array of SVgroupInfo
|
||||||
} SUseDbRsp;
|
} SUseDbRsp;
|
||||||
|
@ -1066,6 +1073,7 @@ typedef struct {
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t vgId;
|
int32_t vgId;
|
||||||
int32_t syncState;
|
int32_t syncState;
|
||||||
|
int64_t cacheUsage;
|
||||||
int64_t numOfTables;
|
int64_t numOfTables;
|
||||||
int64_t numOfTimeSeries;
|
int64_t numOfTimeSeries;
|
||||||
int64_t totalStorage;
|
int64_t totalStorage;
|
||||||
|
@ -1190,6 +1198,10 @@ typedef struct {
|
||||||
int64_t walRetentionSize;
|
int64_t walRetentionSize;
|
||||||
int32_t walRollPeriod;
|
int32_t walRollPeriod;
|
||||||
int64_t walSegmentSize;
|
int64_t walSegmentSize;
|
||||||
|
int16_t sstTrigger;
|
||||||
|
int16_t hashPrefix;
|
||||||
|
int16_t hashSuffix;
|
||||||
|
int32_t tsdbPageSize;
|
||||||
} SCreateVnodeReq;
|
} SCreateVnodeReq;
|
||||||
|
|
||||||
int32_t tSerializeSCreateVnodeReq(void* buf, int32_t bufLen, SCreateVnodeReq* pReq);
|
int32_t tSerializeSCreateVnodeReq(void* buf, int32_t bufLen, SCreateVnodeReq* pReq);
|
||||||
|
@ -2078,9 +2090,9 @@ int32_t tDeserializeSVCreateTbBatchRsp(void* buf, int32_t bufLen, SVCreateTbBatc
|
||||||
|
|
||||||
// TDMT_VND_DROP_TABLE =================
|
// TDMT_VND_DROP_TABLE =================
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char* name;
|
char* name;
|
||||||
uint64_t suid; // for tmq in wal format
|
uint64_t suid; // for tmq in wal format
|
||||||
int8_t igNotExists;
|
int8_t igNotExists;
|
||||||
} SVDropTbReq;
|
} SVDropTbReq;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
|
|
@ -272,6 +272,8 @@ enum {
|
||||||
TD_DEF_MSG_TYPE(TDMT_SYNC_LEADER_TRANSFER, "sync-leader-transfer", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SYNC_LEADER_TRANSFER, "sync-leader-transfer", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_SYNC_SET_MNODE_STANDBY, "set-mnode-standby", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SYNC_SET_MNODE_STANDBY, "set-mnode-standby", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_SYNC_SET_VNODE_STANDBY, "set-vnode-standby", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SYNC_SET_VNODE_STANDBY, "set-vnode-standby", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_SYNC_HEARTBEAT, "sync-heartbeat", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_SYNC_HEARTBEAT_REPLY, "sync-heartbeat-reply", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL)
|
||||||
|
|
||||||
#if defined(TD_MSG_NUMBER_)
|
#if defined(TD_MSG_NUMBER_)
|
||||||
|
|
|
@ -89,237 +89,241 @@
|
||||||
#define TK_KEEP 71
|
#define TK_KEEP 71
|
||||||
#define TK_PAGES 72
|
#define TK_PAGES 72
|
||||||
#define TK_PAGESIZE 73
|
#define TK_PAGESIZE 73
|
||||||
#define TK_PRECISION 74
|
#define TK_TSDB_PAGESIZE 74
|
||||||
#define TK_REPLICA 75
|
#define TK_PRECISION 75
|
||||||
#define TK_STRICT 76
|
#define TK_REPLICA 76
|
||||||
#define TK_VGROUPS 77
|
#define TK_STRICT 77
|
||||||
#define TK_SINGLE_STABLE 78
|
#define TK_VGROUPS 78
|
||||||
#define TK_RETENTIONS 79
|
#define TK_SINGLE_STABLE 79
|
||||||
#define TK_SCHEMALESS 80
|
#define TK_RETENTIONS 80
|
||||||
#define TK_WAL_LEVEL 81
|
#define TK_SCHEMALESS 81
|
||||||
#define TK_WAL_FSYNC_PERIOD 82
|
#define TK_WAL_LEVEL 82
|
||||||
#define TK_WAL_RETENTION_PERIOD 83
|
#define TK_WAL_FSYNC_PERIOD 83
|
||||||
#define TK_WAL_RETENTION_SIZE 84
|
#define TK_WAL_RETENTION_PERIOD 84
|
||||||
#define TK_WAL_ROLL_PERIOD 85
|
#define TK_WAL_RETENTION_SIZE 85
|
||||||
#define TK_WAL_SEGMENT_SIZE 86
|
#define TK_WAL_ROLL_PERIOD 86
|
||||||
#define TK_NK_COLON 87
|
#define TK_WAL_SEGMENT_SIZE 87
|
||||||
#define TK_TABLE 88
|
#define TK_STT_TRIGGER 88
|
||||||
#define TK_NK_LP 89
|
#define TK_TABLE_PREFIX 89
|
||||||
#define TK_NK_RP 90
|
#define TK_TABLE_SUFFIX 90
|
||||||
#define TK_STABLE 91
|
#define TK_NK_COLON 91
|
||||||
#define TK_ADD 92
|
#define TK_TABLE 92
|
||||||
#define TK_COLUMN 93
|
#define TK_NK_LP 93
|
||||||
#define TK_MODIFY 94
|
#define TK_NK_RP 94
|
||||||
#define TK_RENAME 95
|
#define TK_STABLE 95
|
||||||
#define TK_TAG 96
|
#define TK_ADD 96
|
||||||
#define TK_SET 97
|
#define TK_COLUMN 97
|
||||||
#define TK_NK_EQ 98
|
#define TK_MODIFY 98
|
||||||
#define TK_USING 99
|
#define TK_RENAME 99
|
||||||
#define TK_TAGS 100
|
#define TK_TAG 100
|
||||||
#define TK_COMMENT 101
|
#define TK_SET 101
|
||||||
#define TK_BOOL 102
|
#define TK_NK_EQ 102
|
||||||
#define TK_TINYINT 103
|
#define TK_USING 103
|
||||||
#define TK_SMALLINT 104
|
#define TK_TAGS 104
|
||||||
#define TK_INT 105
|
#define TK_COMMENT 105
|
||||||
#define TK_INTEGER 106
|
#define TK_BOOL 106
|
||||||
#define TK_BIGINT 107
|
#define TK_TINYINT 107
|
||||||
#define TK_FLOAT 108
|
#define TK_SMALLINT 108
|
||||||
#define TK_DOUBLE 109
|
#define TK_INT 109
|
||||||
#define TK_BINARY 110
|
#define TK_INTEGER 110
|
||||||
#define TK_TIMESTAMP 111
|
#define TK_BIGINT 111
|
||||||
#define TK_NCHAR 112
|
#define TK_FLOAT 112
|
||||||
#define TK_UNSIGNED 113
|
#define TK_DOUBLE 113
|
||||||
#define TK_JSON 114
|
#define TK_BINARY 114
|
||||||
#define TK_VARCHAR 115
|
#define TK_TIMESTAMP 115
|
||||||
#define TK_MEDIUMBLOB 116
|
#define TK_NCHAR 116
|
||||||
#define TK_BLOB 117
|
#define TK_UNSIGNED 117
|
||||||
#define TK_VARBINARY 118
|
#define TK_JSON 118
|
||||||
#define TK_DECIMAL 119
|
#define TK_VARCHAR 119
|
||||||
#define TK_MAX_DELAY 120
|
#define TK_MEDIUMBLOB 120
|
||||||
#define TK_WATERMARK 121
|
#define TK_BLOB 121
|
||||||
#define TK_ROLLUP 122
|
#define TK_VARBINARY 122
|
||||||
#define TK_TTL 123
|
#define TK_DECIMAL 123
|
||||||
#define TK_SMA 124
|
#define TK_MAX_DELAY 124
|
||||||
#define TK_FIRST 125
|
#define TK_WATERMARK 125
|
||||||
#define TK_LAST 126
|
#define TK_ROLLUP 126
|
||||||
#define TK_SHOW 127
|
#define TK_TTL 127
|
||||||
#define TK_DATABASES 128
|
#define TK_SMA 128
|
||||||
#define TK_TABLES 129
|
#define TK_FIRST 129
|
||||||
#define TK_STABLES 130
|
#define TK_LAST 130
|
||||||
#define TK_MNODES 131
|
#define TK_SHOW 131
|
||||||
#define TK_MODULES 132
|
#define TK_DATABASES 132
|
||||||
#define TK_QNODES 133
|
#define TK_TABLES 133
|
||||||
#define TK_FUNCTIONS 134
|
#define TK_STABLES 134
|
||||||
#define TK_INDEXES 135
|
#define TK_MNODES 135
|
||||||
#define TK_ACCOUNTS 136
|
#define TK_MODULES 136
|
||||||
#define TK_APPS 137
|
#define TK_QNODES 137
|
||||||
#define TK_CONNECTIONS 138
|
#define TK_FUNCTIONS 138
|
||||||
#define TK_LICENCES 139
|
#define TK_INDEXES 139
|
||||||
#define TK_GRANTS 140
|
#define TK_ACCOUNTS 140
|
||||||
#define TK_QUERIES 141
|
#define TK_APPS 141
|
||||||
#define TK_SCORES 142
|
#define TK_CONNECTIONS 142
|
||||||
#define TK_TOPICS 143
|
#define TK_LICENCES 143
|
||||||
#define TK_VARIABLES 144
|
#define TK_GRANTS 144
|
||||||
#define TK_BNODES 145
|
#define TK_QUERIES 145
|
||||||
#define TK_SNODES 146
|
#define TK_SCORES 146
|
||||||
#define TK_CLUSTER 147
|
#define TK_TOPICS 147
|
||||||
#define TK_TRANSACTIONS 148
|
#define TK_VARIABLES 148
|
||||||
#define TK_DISTRIBUTED 149
|
#define TK_BNODES 149
|
||||||
#define TK_CONSUMERS 150
|
#define TK_SNODES 150
|
||||||
#define TK_SUBSCRIPTIONS 151
|
#define TK_CLUSTER 151
|
||||||
#define TK_LIKE 152
|
#define TK_TRANSACTIONS 152
|
||||||
#define TK_INDEX 153
|
#define TK_DISTRIBUTED 153
|
||||||
#define TK_FUNCTION 154
|
#define TK_CONSUMERS 154
|
||||||
#define TK_INTERVAL 155
|
#define TK_SUBSCRIPTIONS 155
|
||||||
#define TK_TOPIC 156
|
#define TK_VNODES 156
|
||||||
#define TK_AS 157
|
#define TK_LIKE 157
|
||||||
#define TK_WITH 158
|
#define TK_INDEX 158
|
||||||
#define TK_META 159
|
#define TK_FUNCTION 159
|
||||||
#define TK_CONSUMER 160
|
#define TK_INTERVAL 160
|
||||||
#define TK_GROUP 161
|
#define TK_TOPIC 161
|
||||||
#define TK_DESC 162
|
#define TK_AS 162
|
||||||
#define TK_DESCRIBE 163
|
#define TK_WITH 163
|
||||||
#define TK_RESET 164
|
#define TK_META 164
|
||||||
#define TK_QUERY 165
|
#define TK_CONSUMER 165
|
||||||
#define TK_CACHE 166
|
#define TK_GROUP 166
|
||||||
#define TK_EXPLAIN 167
|
#define TK_DESC 167
|
||||||
#define TK_ANALYZE 168
|
#define TK_DESCRIBE 168
|
||||||
#define TK_VERBOSE 169
|
#define TK_RESET 169
|
||||||
#define TK_NK_BOOL 170
|
#define TK_QUERY 170
|
||||||
#define TK_RATIO 171
|
#define TK_CACHE 171
|
||||||
#define TK_NK_FLOAT 172
|
#define TK_EXPLAIN 172
|
||||||
#define TK_OUTPUTTYPE 173
|
#define TK_ANALYZE 173
|
||||||
#define TK_AGGREGATE 174
|
#define TK_VERBOSE 174
|
||||||
#define TK_BUFSIZE 175
|
#define TK_NK_BOOL 175
|
||||||
#define TK_STREAM 176
|
#define TK_RATIO 176
|
||||||
#define TK_INTO 177
|
#define TK_NK_FLOAT 177
|
||||||
#define TK_TRIGGER 178
|
#define TK_OUTPUTTYPE 178
|
||||||
#define TK_AT_ONCE 179
|
#define TK_AGGREGATE 179
|
||||||
#define TK_WINDOW_CLOSE 180
|
#define TK_BUFSIZE 180
|
||||||
#define TK_IGNORE 181
|
#define TK_STREAM 181
|
||||||
#define TK_EXPIRED 182
|
#define TK_INTO 182
|
||||||
#define TK_KILL 183
|
#define TK_TRIGGER 183
|
||||||
#define TK_CONNECTION 184
|
#define TK_AT_ONCE 184
|
||||||
#define TK_TRANSACTION 185
|
#define TK_WINDOW_CLOSE 185
|
||||||
#define TK_BALANCE 186
|
#define TK_IGNORE 186
|
||||||
#define TK_VGROUP 187
|
#define TK_EXPIRED 187
|
||||||
#define TK_MERGE 188
|
#define TK_KILL 188
|
||||||
#define TK_REDISTRIBUTE 189
|
#define TK_CONNECTION 189
|
||||||
#define TK_SPLIT 190
|
#define TK_TRANSACTION 190
|
||||||
#define TK_DELETE 191
|
#define TK_BALANCE 191
|
||||||
#define TK_INSERT 192
|
#define TK_VGROUP 192
|
||||||
#define TK_NULL 193
|
#define TK_MERGE 193
|
||||||
#define TK_NK_QUESTION 194
|
#define TK_REDISTRIBUTE 194
|
||||||
#define TK_NK_ARROW 195
|
#define TK_SPLIT 195
|
||||||
#define TK_ROWTS 196
|
#define TK_DELETE 196
|
||||||
#define TK_TBNAME 197
|
#define TK_INSERT 197
|
||||||
#define TK_QSTART 198
|
#define TK_NULL 198
|
||||||
#define TK_QEND 199
|
#define TK_NK_QUESTION 199
|
||||||
#define TK_QDURATION 200
|
#define TK_NK_ARROW 200
|
||||||
#define TK_WSTART 201
|
#define TK_ROWTS 201
|
||||||
#define TK_WEND 202
|
#define TK_TBNAME 202
|
||||||
#define TK_WDURATION 203
|
#define TK_QSTART 203
|
||||||
#define TK_CAST 204
|
#define TK_QEND 204
|
||||||
#define TK_NOW 205
|
#define TK_QDURATION 205
|
||||||
#define TK_TODAY 206
|
#define TK_WSTART 206
|
||||||
#define TK_TIMEZONE 207
|
#define TK_WEND 207
|
||||||
#define TK_CLIENT_VERSION 208
|
#define TK_WDURATION 208
|
||||||
#define TK_SERVER_VERSION 209
|
#define TK_CAST 209
|
||||||
#define TK_SERVER_STATUS 210
|
#define TK_NOW 210
|
||||||
#define TK_CURRENT_USER 211
|
#define TK_TODAY 211
|
||||||
#define TK_COUNT 212
|
#define TK_TIMEZONE 212
|
||||||
#define TK_LAST_ROW 213
|
#define TK_CLIENT_VERSION 213
|
||||||
#define TK_BETWEEN 214
|
#define TK_SERVER_VERSION 214
|
||||||
#define TK_IS 215
|
#define TK_SERVER_STATUS 215
|
||||||
#define TK_NK_LT 216
|
#define TK_CURRENT_USER 216
|
||||||
#define TK_NK_GT 217
|
#define TK_COUNT 217
|
||||||
#define TK_NK_LE 218
|
#define TK_LAST_ROW 218
|
||||||
#define TK_NK_GE 219
|
#define TK_BETWEEN 219
|
||||||
#define TK_NK_NE 220
|
#define TK_IS 220
|
||||||
#define TK_MATCH 221
|
#define TK_NK_LT 221
|
||||||
#define TK_NMATCH 222
|
#define TK_NK_GT 222
|
||||||
#define TK_CONTAINS 223
|
#define TK_NK_LE 223
|
||||||
#define TK_IN 224
|
#define TK_NK_GE 224
|
||||||
#define TK_JOIN 225
|
#define TK_NK_NE 225
|
||||||
#define TK_INNER 226
|
#define TK_MATCH 226
|
||||||
#define TK_SELECT 227
|
#define TK_NMATCH 227
|
||||||
#define TK_DISTINCT 228
|
#define TK_CONTAINS 228
|
||||||
#define TK_WHERE 229
|
#define TK_IN 229
|
||||||
#define TK_PARTITION 230
|
#define TK_JOIN 230
|
||||||
#define TK_BY 231
|
#define TK_INNER 231
|
||||||
#define TK_SESSION 232
|
#define TK_SELECT 232
|
||||||
#define TK_STATE_WINDOW 233
|
#define TK_DISTINCT 233
|
||||||
#define TK_SLIDING 234
|
#define TK_WHERE 234
|
||||||
#define TK_FILL 235
|
#define TK_PARTITION 235
|
||||||
#define TK_VALUE 236
|
#define TK_BY 236
|
||||||
#define TK_NONE 237
|
#define TK_SESSION 237
|
||||||
#define TK_PREV 238
|
#define TK_STATE_WINDOW 238
|
||||||
#define TK_LINEAR 239
|
#define TK_SLIDING 239
|
||||||
#define TK_NEXT 240
|
#define TK_FILL 240
|
||||||
#define TK_HAVING 241
|
#define TK_VALUE 241
|
||||||
#define TK_RANGE 242
|
#define TK_NONE 242
|
||||||
#define TK_EVERY 243
|
#define TK_PREV 243
|
||||||
#define TK_ORDER 244
|
#define TK_LINEAR 244
|
||||||
#define TK_SLIMIT 245
|
#define TK_NEXT 245
|
||||||
#define TK_SOFFSET 246
|
#define TK_HAVING 246
|
||||||
#define TK_LIMIT 247
|
#define TK_RANGE 247
|
||||||
#define TK_OFFSET 248
|
#define TK_EVERY 248
|
||||||
#define TK_ASC 249
|
#define TK_ORDER 249
|
||||||
#define TK_NULLS 250
|
#define TK_SLIMIT 250
|
||||||
#define TK_ABORT 251
|
#define TK_SOFFSET 251
|
||||||
#define TK_AFTER 252
|
#define TK_LIMIT 252
|
||||||
#define TK_ATTACH 253
|
#define TK_OFFSET 253
|
||||||
#define TK_BEFORE 254
|
#define TK_ASC 254
|
||||||
#define TK_BEGIN 255
|
#define TK_NULLS 255
|
||||||
#define TK_BITAND 256
|
#define TK_ABORT 256
|
||||||
#define TK_BITNOT 257
|
#define TK_AFTER 257
|
||||||
#define TK_BITOR 258
|
#define TK_ATTACH 258
|
||||||
#define TK_BLOCKS 259
|
#define TK_BEFORE 259
|
||||||
#define TK_CHANGE 260
|
#define TK_BEGIN 260
|
||||||
#define TK_COMMA 261
|
#define TK_BITAND 261
|
||||||
#define TK_COMPACT 262
|
#define TK_BITNOT 262
|
||||||
#define TK_CONCAT 263
|
#define TK_BITOR 263
|
||||||
#define TK_CONFLICT 264
|
#define TK_BLOCKS 264
|
||||||
#define TK_COPY 265
|
#define TK_CHANGE 265
|
||||||
#define TK_DEFERRED 266
|
#define TK_COMMA 266
|
||||||
#define TK_DELIMITERS 267
|
#define TK_COMPACT 267
|
||||||
#define TK_DETACH 268
|
#define TK_CONCAT 268
|
||||||
#define TK_DIVIDE 269
|
#define TK_CONFLICT 269
|
||||||
#define TK_DOT 270
|
#define TK_COPY 270
|
||||||
#define TK_EACH 271
|
#define TK_DEFERRED 271
|
||||||
#define TK_END 272
|
#define TK_DELIMITERS 272
|
||||||
#define TK_FAIL 273
|
#define TK_DETACH 273
|
||||||
#define TK_FILE 274
|
#define TK_DIVIDE 274
|
||||||
#define TK_FOR 275
|
#define TK_DOT 275
|
||||||
#define TK_GLOB 276
|
#define TK_EACH 276
|
||||||
#define TK_ID 277
|
#define TK_END 277
|
||||||
#define TK_IMMEDIATE 278
|
#define TK_FAIL 278
|
||||||
#define TK_IMPORT 279
|
#define TK_FILE 279
|
||||||
#define TK_INITIALLY 280
|
#define TK_FOR 280
|
||||||
#define TK_INSTEAD 281
|
#define TK_GLOB 281
|
||||||
#define TK_ISNULL 282
|
#define TK_ID 282
|
||||||
#define TK_KEY 283
|
#define TK_IMMEDIATE 283
|
||||||
#define TK_NK_BITNOT 284
|
#define TK_IMPORT 284
|
||||||
#define TK_NK_SEMI 285
|
#define TK_INITIALLY 285
|
||||||
#define TK_NOTNULL 286
|
#define TK_INSTEAD 286
|
||||||
#define TK_OF 287
|
#define TK_ISNULL 287
|
||||||
#define TK_PLUS 288
|
#define TK_KEY 288
|
||||||
#define TK_PRIVILEGE 289
|
#define TK_NK_BITNOT 289
|
||||||
#define TK_RAISE 290
|
#define TK_NK_SEMI 290
|
||||||
#define TK_REPLACE 291
|
#define TK_NOTNULL 291
|
||||||
#define TK_RESTRICT 292
|
#define TK_OF 292
|
||||||
#define TK_ROW 293
|
#define TK_PLUS 293
|
||||||
#define TK_SEMI 294
|
#define TK_PRIVILEGE 294
|
||||||
#define TK_STAR 295
|
#define TK_RAISE 295
|
||||||
#define TK_STATEMENT 296
|
#define TK_REPLACE 296
|
||||||
#define TK_STRING 297
|
#define TK_RESTRICT 297
|
||||||
#define TK_TIMES 298
|
#define TK_ROW 298
|
||||||
#define TK_UPDATE 299
|
#define TK_SEMI 299
|
||||||
#define TK_VALUES 300
|
#define TK_STAR 300
|
||||||
#define TK_VARIABLE 301
|
#define TK_STATEMENT 301
|
||||||
#define TK_VIEW 302
|
#define TK_STRING 302
|
||||||
#define TK_VNODES 303
|
#define TK_TIMES 303
|
||||||
#define TK_WAL 304
|
#define TK_UPDATE 304
|
||||||
|
#define TK_VALUES 305
|
||||||
|
#define TK_VARIABLE 306
|
||||||
|
#define TK_VIEW 307
|
||||||
|
#define TK_WAL 308
|
||||||
|
|
||||||
#define TK_NK_SPACE 300
|
#define TK_NK_SPACE 300
|
||||||
#define TK_NK_COMMENT 301
|
#define TK_NK_COMMENT 301
|
||||||
|
|
|
@ -34,66 +34,69 @@ typedef struct SFuncExecEnv {
|
||||||
int32_t calcMemSize;
|
int32_t calcMemSize;
|
||||||
} SFuncExecEnv;
|
} SFuncExecEnv;
|
||||||
|
|
||||||
typedef bool (*FExecGetEnv)(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
|
typedef bool (*FExecGetEnv)(struct SFunctionNode *pFunc, SFuncExecEnv *pEnv);
|
||||||
typedef bool (*FExecInit)(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResultCellInfo);
|
typedef bool (*FExecInit)(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo *pResultCellInfo);
|
||||||
typedef int32_t (*FExecProcess)(struct SqlFunctionCtx *pCtx);
|
typedef int32_t (*FExecProcess)(struct SqlFunctionCtx *pCtx);
|
||||||
typedef int32_t (*FExecFinalize)(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock);
|
typedef int32_t (*FExecFinalize)(struct SqlFunctionCtx *pCtx, SSDataBlock *pBlock);
|
||||||
typedef int32_t (*FScalarExecProcess)(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
typedef int32_t (*FScalarExecProcess)(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
typedef int32_t (*FExecCombine)(struct SqlFunctionCtx *pDestCtx, struct SqlFunctionCtx *pSourceCtx);
|
typedef int32_t (*FExecCombine)(struct SqlFunctionCtx *pDestCtx, struct SqlFunctionCtx *pSourceCtx);
|
||||||
|
|
||||||
typedef struct SScalarFuncExecFuncs {
|
typedef struct SScalarFuncExecFuncs {
|
||||||
FExecGetEnv getEnv;
|
FExecGetEnv getEnv;
|
||||||
FScalarExecProcess process;
|
FScalarExecProcess process;
|
||||||
} SScalarFuncExecFuncs;
|
} SScalarFuncExecFuncs;
|
||||||
|
|
||||||
typedef struct SFuncExecFuncs {
|
typedef struct SFuncExecFuncs {
|
||||||
FExecGetEnv getEnv;
|
FExecGetEnv getEnv;
|
||||||
FExecInit init;
|
FExecInit init;
|
||||||
FExecProcess process;
|
FExecProcess process;
|
||||||
FExecFinalize finalize;
|
FExecFinalize finalize;
|
||||||
FExecCombine combine;
|
FExecCombine combine;
|
||||||
} SFuncExecFuncs;
|
} SFuncExecFuncs;
|
||||||
|
|
||||||
#define MAX_INTERVAL_TIME_WINDOW 1000000 // maximum allowed time windows in final results
|
#define MAX_INTERVAL_TIME_WINDOW 1000000 // maximum allowed time windows in final results
|
||||||
|
|
||||||
#define TOP_BOTTOM_QUERY_LIMIT 100
|
#define TOP_BOTTOM_QUERY_LIMIT 100
|
||||||
#define FUNCTIONS_NAME_MAX_LENGTH 16
|
#define FUNCTIONS_NAME_MAX_LENGTH 16
|
||||||
|
|
||||||
typedef struct SResultRowEntryInfo {
|
typedef struct SResultRowEntryInfo {
|
||||||
bool initialized:1; // output buffer has been initialized
|
bool initialized : 1; // output buffer has been initialized
|
||||||
bool complete:1; // query has completed
|
bool complete : 1; // query has completed
|
||||||
uint8_t isNullRes:6; // the result is null
|
uint8_t isNullRes : 6; // the result is null
|
||||||
uint16_t numOfRes; // num of output result in current buffer. NOT NULL RESULT
|
uint16_t numOfRes; // num of output result in current buffer. NOT NULL RESULT
|
||||||
} SResultRowEntryInfo;
|
} SResultRowEntryInfo;
|
||||||
|
|
||||||
// determine the real data need to calculated the result
|
// determine the real data need to calculated the result
|
||||||
enum {
|
enum {
|
||||||
BLK_DATA_NOT_LOAD = 0x0,
|
BLK_DATA_NOT_LOAD = 0x0,
|
||||||
BLK_DATA_SMA_LOAD = 0x1,
|
BLK_DATA_SMA_LOAD = 0x1,
|
||||||
BLK_DATA_DATA_LOAD = 0x3,
|
BLK_DATA_DATA_LOAD = 0x3,
|
||||||
BLK_DATA_FILTEROUT = 0x4, // discard current data block since it is not qualified for filter
|
BLK_DATA_FILTEROUT = 0x4, // discard current data block since it is not qualified for filter
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
MAIN_SCAN = 0x0u,
|
MAIN_SCAN = 0x0u,
|
||||||
REVERSE_SCAN = 0x1u, // todo remove it
|
REVERSE_SCAN = 0x1u, // todo remove it
|
||||||
REPEAT_SCAN = 0x2u, //repeat scan belongs to the master scan
|
REPEAT_SCAN = 0x2u, // repeat scan belongs to the master scan
|
||||||
MERGE_STAGE = 0x20u,
|
MERGE_STAGE = 0x20u,
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef struct SPoint1 {
|
typedef struct SPoint1 {
|
||||||
int64_t key;
|
int64_t key;
|
||||||
union{double val; char* ptr;};
|
union {
|
||||||
|
double val;
|
||||||
|
char *ptr;
|
||||||
|
};
|
||||||
} SPoint1;
|
} SPoint1;
|
||||||
|
|
||||||
struct SqlFunctionCtx;
|
struct SqlFunctionCtx;
|
||||||
struct SResultRowEntryInfo;
|
struct SResultRowEntryInfo;
|
||||||
|
|
||||||
//for selectivity query, the corresponding tag value is assigned if the data is qualified
|
// for selectivity query, the corresponding tag value is assigned if the data is qualified
|
||||||
typedef struct SSubsidiaryResInfo {
|
typedef struct SSubsidiaryResInfo {
|
||||||
int16_t num;
|
int16_t num;
|
||||||
int32_t rowLen;
|
int32_t rowLen;
|
||||||
char* buf; // serialize data buffer
|
char *buf; // serialize data buffer
|
||||||
struct SqlFunctionCtx **pCtx;
|
struct SqlFunctionCtx **pCtx;
|
||||||
} SSubsidiaryResInfo;
|
} SSubsidiaryResInfo;
|
||||||
|
|
||||||
|
@ -106,69 +109,70 @@ typedef struct SResultDataInfo {
|
||||||
} SResultDataInfo;
|
} SResultDataInfo;
|
||||||
|
|
||||||
#define GET_RES_INFO(ctx) ((ctx)->resultInfo)
|
#define GET_RES_INFO(ctx) ((ctx)->resultInfo)
|
||||||
#define GET_ROWCELL_INTERBUF(_c) ((void*) ((char*)(_c) + sizeof(SResultRowEntryInfo)))
|
#define GET_ROWCELL_INTERBUF(_c) ((void *)((char *)(_c) + sizeof(SResultRowEntryInfo)))
|
||||||
|
|
||||||
typedef struct SInputColumnInfoData {
|
typedef struct SInputColumnInfoData {
|
||||||
int32_t totalRows; // total rows in current columnar data
|
int32_t totalRows; // total rows in current columnar data
|
||||||
int32_t startRowIndex; // handle started row index
|
int32_t startRowIndex; // handle started row index
|
||||||
int32_t numOfRows; // the number of rows needs to be handled
|
int32_t numOfRows; // the number of rows needs to be handled
|
||||||
int32_t numOfInputCols; // PTS is not included
|
int32_t numOfInputCols; // PTS is not included
|
||||||
bool colDataAggIsSet;// if agg is set or not
|
bool colDataAggIsSet; // if agg is set or not
|
||||||
SColumnInfoData *pPTS; // primary timestamp column
|
SColumnInfoData *pPTS; // primary timestamp column
|
||||||
SColumnInfoData **pData;
|
SColumnInfoData **pData;
|
||||||
SColumnDataAgg **pColumnDataAgg;
|
SColumnDataAgg **pColumnDataAgg;
|
||||||
uint64_t uid; // table uid, used to set the tag value when building the final query result for selectivity functions.
|
uint64_t uid; // table uid, used to set the tag value when building the final query result for selectivity functions.
|
||||||
} SInputColumnInfoData;
|
} SInputColumnInfoData;
|
||||||
|
|
||||||
typedef struct SSerializeDataHandle {
|
typedef struct SSerializeDataHandle {
|
||||||
struct SDiskbasedBuf* pBuf;
|
struct SDiskbasedBuf *pBuf;
|
||||||
int32_t currentPage;
|
int32_t currentPage;
|
||||||
|
void *pState;
|
||||||
} SSerializeDataHandle;
|
} SSerializeDataHandle;
|
||||||
|
|
||||||
// sql function runtime context
|
// sql function runtime context
|
||||||
typedef struct SqlFunctionCtx {
|
typedef struct SqlFunctionCtx {
|
||||||
SInputColumnInfoData input;
|
SInputColumnInfoData input;
|
||||||
SResultDataInfo resDataInfo;
|
SResultDataInfo resDataInfo;
|
||||||
uint32_t order; // data block scanner order: asc|desc
|
uint32_t order; // data block scanner order: asc|desc
|
||||||
uint8_t scanFlag; // record current running step, default: 0
|
uint8_t scanFlag; // record current running step, default: 0
|
||||||
int16_t functionId; // function id
|
int16_t functionId; // function id
|
||||||
char *pOutput; // final result output buffer, point to sdata->data
|
char *pOutput; // final result output buffer, point to sdata->data
|
||||||
int32_t numOfParams;
|
int32_t numOfParams;
|
||||||
SFunctParam *param; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param
|
SFunctParam *param; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param
|
||||||
SColumnInfoData *pTsOutput; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/
|
SColumnInfoData *pTsOutput; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/
|
||||||
int32_t offset;
|
int32_t offset;
|
||||||
struct SResultRowEntryInfo *resultInfo;
|
struct SResultRowEntryInfo *resultInfo;
|
||||||
SSubsidiaryResInfo subsidiaries;
|
SSubsidiaryResInfo subsidiaries;
|
||||||
SPoint1 start;
|
SPoint1 start;
|
||||||
SPoint1 end;
|
SPoint1 end;
|
||||||
SFuncExecFuncs fpSet;
|
SFuncExecFuncs fpSet;
|
||||||
SScalarFuncExecFuncs sfp;
|
SScalarFuncExecFuncs sfp;
|
||||||
struct SExprInfo *pExpr;
|
struct SExprInfo *pExpr;
|
||||||
struct SSDataBlock *pSrcBlock;
|
struct SSDataBlock *pSrcBlock;
|
||||||
struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity
|
struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity
|
||||||
SSerializeDataHandle saveHandle;
|
SSerializeDataHandle saveHandle;
|
||||||
bool isStream;
|
bool isStream;
|
||||||
|
|
||||||
char udfName[TSDB_FUNC_NAME_LEN];
|
char udfName[TSDB_FUNC_NAME_LEN];
|
||||||
} SqlFunctionCtx;
|
} SqlFunctionCtx;
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
TEXPR_BINARYEXPR_NODE= 0x1,
|
TEXPR_BINARYEXPR_NODE = 0x1,
|
||||||
TEXPR_UNARYEXPR_NODE = 0x2,
|
TEXPR_UNARYEXPR_NODE = 0x2,
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef struct tExprNode {
|
typedef struct tExprNode {
|
||||||
int32_t nodeType;
|
int32_t nodeType;
|
||||||
union {
|
union {
|
||||||
struct {// function node
|
struct { // function node
|
||||||
char functionName[FUNCTIONS_NAME_MAX_LENGTH]; // todo refactor
|
char functionName[FUNCTIONS_NAME_MAX_LENGTH]; // todo refactor
|
||||||
int32_t functionId;
|
int32_t functionId;
|
||||||
int32_t num;
|
int32_t num;
|
||||||
struct SFunctionNode *pFunctNode;
|
struct SFunctionNode *pFunctNode;
|
||||||
} _function;
|
} _function;
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
struct SNode* pRootNode;
|
struct SNode *pRootNode;
|
||||||
} _optrRoot;
|
} _optrRoot;
|
||||||
};
|
};
|
||||||
} tExprNode;
|
} tExprNode;
|
||||||
|
@ -182,17 +186,18 @@ struct SScalarParam {
|
||||||
int32_t numOfRows;
|
int32_t numOfRows;
|
||||||
};
|
};
|
||||||
|
|
||||||
void cleanupResultRowEntry(struct SResultRowEntryInfo* pCell);
|
void cleanupResultRowEntry(struct SResultRowEntryInfo *pCell);
|
||||||
int32_t getNumOfResult(SqlFunctionCtx* pCtx, int32_t num, SSDataBlock* pResBlock);
|
int32_t getNumOfResult(SqlFunctionCtx *pCtx, int32_t num, SSDataBlock *pResBlock);
|
||||||
bool isRowEntryCompleted(struct SResultRowEntryInfo* pEntry);
|
bool isRowEntryCompleted(struct SResultRowEntryInfo *pEntry);
|
||||||
bool isRowEntryInitialized(struct SResultRowEntryInfo* pEntry);
|
bool isRowEntryInitialized(struct SResultRowEntryInfo *pEntry);
|
||||||
|
|
||||||
typedef struct SPoint {
|
typedef struct SPoint {
|
||||||
int64_t key;
|
int64_t key;
|
||||||
void * val;
|
void *val;
|
||||||
} SPoint;
|
} SPoint;
|
||||||
|
|
||||||
int32_t taosGetLinearInterpolationVal(SPoint* point, int32_t outputType, SPoint* point1, SPoint* point2, int32_t inputType);
|
int32_t taosGetLinearInterpolationVal(SPoint *point, int32_t outputType, SPoint *point1, SPoint *point2,
|
||||||
|
int32_t inputType);
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// udf api
|
// udf api
|
||||||
|
|
|
@ -64,6 +64,7 @@ typedef struct SDatabaseOptions {
|
||||||
int64_t keep[3];
|
int64_t keep[3];
|
||||||
int32_t pages;
|
int32_t pages;
|
||||||
int32_t pagesize;
|
int32_t pagesize;
|
||||||
|
int32_t tsdbPageSize;
|
||||||
char precisionStr[3];
|
char precisionStr[3];
|
||||||
int8_t precision;
|
int8_t precision;
|
||||||
int8_t replica;
|
int8_t replica;
|
||||||
|
@ -78,6 +79,12 @@ typedef struct SDatabaseOptions {
|
||||||
int32_t walRetentionSize;
|
int32_t walRetentionSize;
|
||||||
int32_t walRollPeriod;
|
int32_t walRollPeriod;
|
||||||
int32_t walSegmentSize;
|
int32_t walSegmentSize;
|
||||||
|
bool walRetentionPeriodIsSet;
|
||||||
|
bool walRetentionSizeIsSet;
|
||||||
|
bool walRollPeriodIsSet;
|
||||||
|
int32_t sstTrigger;
|
||||||
|
int32_t tablePrefix;
|
||||||
|
int32_t tableSuffix;
|
||||||
} SDatabaseOptions;
|
} SDatabaseOptions;
|
||||||
|
|
||||||
typedef struct SCreateDatabaseStmt {
|
typedef struct SCreateDatabaseStmt {
|
||||||
|
@ -268,6 +275,12 @@ typedef struct SShowDnodeVariablesStmt {
|
||||||
SNode* pDnodeId;
|
SNode* pDnodeId;
|
||||||
} SShowDnodeVariablesStmt;
|
} SShowDnodeVariablesStmt;
|
||||||
|
|
||||||
|
typedef struct SShowVnodesStmt {
|
||||||
|
ENodeType type;
|
||||||
|
SNode* pDnodeId;
|
||||||
|
SNode* pDnodeEndpoint;
|
||||||
|
} SShowVnodesStmt;
|
||||||
|
|
||||||
typedef enum EIndexType { INDEX_TYPE_SMA = 1, INDEX_TYPE_FULLTEXT } EIndexType;
|
typedef enum EIndexType { INDEX_TYPE_SMA = 1, INDEX_TYPE_FULLTEXT } EIndexType;
|
||||||
|
|
||||||
typedef struct SIndexOptions {
|
typedef struct SIndexOptions {
|
||||||
|
|
|
@ -183,12 +183,12 @@ typedef enum ENodeType {
|
||||||
QUERY_NODE_SHOW_DNODE_VARIABLES_STMT,
|
QUERY_NODE_SHOW_DNODE_VARIABLES_STMT,
|
||||||
QUERY_NODE_SHOW_TRANSACTIONS_STMT,
|
QUERY_NODE_SHOW_TRANSACTIONS_STMT,
|
||||||
QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT,
|
QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT,
|
||||||
|
QUERY_NODE_SHOW_VNODES_STMT,
|
||||||
QUERY_NODE_SHOW_CREATE_DATABASE_STMT,
|
QUERY_NODE_SHOW_CREATE_DATABASE_STMT,
|
||||||
QUERY_NODE_SHOW_CREATE_TABLE_STMT,
|
QUERY_NODE_SHOW_CREATE_TABLE_STMT,
|
||||||
QUERY_NODE_SHOW_CREATE_STABLE_STMT,
|
QUERY_NODE_SHOW_CREATE_STABLE_STMT,
|
||||||
QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT,
|
QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT,
|
||||||
QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT,
|
QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT,
|
||||||
QUERY_NODE_SHOW_VNODES_STMT,
|
|
||||||
QUERY_NODE_SHOW_SCORES_STMT,
|
QUERY_NODE_SHOW_SCORES_STMT,
|
||||||
QUERY_NODE_KILL_CONNECTION_STMT,
|
QUERY_NODE_KILL_CONNECTION_STMT,
|
||||||
QUERY_NODE_KILL_QUERY_STMT,
|
QUERY_NODE_KILL_QUERY_STMT,
|
||||||
|
@ -244,6 +244,7 @@ typedef enum ENodeType {
|
||||||
QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE,
|
QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE,
|
||||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE,
|
QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE,
|
||||||
QUERY_NODE_PHYSICAL_PLAN_PARTITION,
|
QUERY_NODE_PHYSICAL_PLAN_PARTITION,
|
||||||
|
QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION,
|
||||||
QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC,
|
QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC,
|
||||||
QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC,
|
QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC,
|
||||||
QUERY_NODE_PHYSICAL_PLAN_DISPATCH,
|
QUERY_NODE_PHYSICAL_PLAN_DISPATCH,
|
||||||
|
@ -319,6 +320,9 @@ int32_t nodesStringToNode(const char* pStr, SNode** pNode);
|
||||||
int32_t nodesListToString(const SNodeList* pList, bool format, char** pStr, int32_t* pLen);
|
int32_t nodesListToString(const SNodeList* pList, bool format, char** pStr, int32_t* pLen);
|
||||||
int32_t nodesStringToList(const char* pStr, SNodeList** pList);
|
int32_t nodesStringToList(const char* pStr, SNodeList** pList);
|
||||||
|
|
||||||
|
int32_t nodesNodeToMsg(const SNode* pNode, char** pMsg, int32_t* pLen);
|
||||||
|
int32_t nodesMsgToNode(const char* pStr, int32_t len, SNode** pNode);
|
||||||
|
|
||||||
int32_t nodesNodeToSQL(SNode* pNode, char* buf, int32_t bufSize, int32_t* len);
|
int32_t nodesNodeToSQL(SNode* pNode, char* buf, int32_t bufSize, int32_t* len);
|
||||||
char* nodesGetNameFromColumnNode(SNode* pNode);
|
char* nodesGetNameFromColumnNode(SNode* pNode);
|
||||||
int32_t nodesGetOutputNumFromSlotList(SNodeList* pSlots);
|
int32_t nodesGetOutputNumFromSlotList(SNodeList* pSlots);
|
||||||
|
|
|
@ -151,6 +151,8 @@ typedef struct SVnodeModifyLogicNode {
|
||||||
SArray* pDataBlocks;
|
SArray* pDataBlocks;
|
||||||
SVgDataBlocks* pVgDataBlocks;
|
SVgDataBlocks* pVgDataBlocks;
|
||||||
SNode* pAffectedRows; // SColumnNode
|
SNode* pAffectedRows; // SColumnNode
|
||||||
|
SNode* pStartTs; // SColumnNode
|
||||||
|
SNode* pEndTs; // SColumnNode
|
||||||
uint64_t tableId;
|
uint64_t tableId;
|
||||||
uint64_t stableId;
|
uint64_t stableId;
|
||||||
int8_t tableType; // table type
|
int8_t tableType; // table type
|
||||||
|
@ -489,6 +491,8 @@ typedef struct SPartitionPhysiNode {
|
||||||
SNodeList* pTargets;
|
SNodeList* pTargets;
|
||||||
} SPartitionPhysiNode;
|
} SPartitionPhysiNode;
|
||||||
|
|
||||||
|
typedef SPartitionPhysiNode SStreamPartitionPhysiNode;
|
||||||
|
|
||||||
typedef struct SDataSinkNode {
|
typedef struct SDataSinkNode {
|
||||||
ENodeType type;
|
ENodeType type;
|
||||||
SDataBlockDescNode* pInputDataBlockDesc;
|
SDataBlockDescNode* pInputDataBlockDesc;
|
||||||
|
@ -524,6 +528,8 @@ typedef struct SDataDeleterNode {
|
||||||
char tsColName[TSDB_COL_NAME_LEN];
|
char tsColName[TSDB_COL_NAME_LEN];
|
||||||
STimeWindow deleteTimeRange;
|
STimeWindow deleteTimeRange;
|
||||||
SNode* pAffectedRows;
|
SNode* pAffectedRows;
|
||||||
|
SNode* pStartTs;
|
||||||
|
SNode* pEndTs;
|
||||||
} SDataDeleterNode;
|
} SDataDeleterNode;
|
||||||
|
|
||||||
typedef struct SSubplan {
|
typedef struct SSubplan {
|
||||||
|
|
|
@ -315,6 +315,8 @@ typedef struct SDeleteStmt {
|
||||||
SNode* pFromTable; // FROM clause
|
SNode* pFromTable; // FROM clause
|
||||||
SNode* pWhere; // WHERE clause
|
SNode* pWhere; // WHERE clause
|
||||||
SNode* pCountFunc; // count the number of rows affected
|
SNode* pCountFunc; // count the number of rows affected
|
||||||
|
SNode* pFirstFunc; // the start timestamp when the data was actually deleted
|
||||||
|
SNode* pLastFunc; // the end timestamp when the data was actually deleted
|
||||||
SNode* pTagCond; // pWhere divided into pTagCond and timeRange
|
SNode* pTagCond; // pWhere divided into pTagCond and timeRange
|
||||||
STimeWindow timeRange;
|
STimeWindow timeRange;
|
||||||
uint8_t precision;
|
uint8_t precision;
|
||||||
|
|
|
@ -52,10 +52,14 @@ int32_t qSetSubplanExecutionNode(SSubplan* pSubplan, int32_t groupId, SDownstrea
|
||||||
|
|
||||||
void qClearSubplanExecutionNode(SSubplan* pSubplan);
|
void qClearSubplanExecutionNode(SSubplan* pSubplan);
|
||||||
|
|
||||||
// Convert to subplan to string for the scheduler to send to the executor
|
// Convert to subplan to display string for the scheduler to send to the executor
|
||||||
int32_t qSubPlanToString(const SSubplan* pSubplan, char** pStr, int32_t* pLen);
|
int32_t qSubPlanToString(const SSubplan* pSubplan, char** pStr, int32_t* pLen);
|
||||||
int32_t qStringToSubplan(const char* pStr, SSubplan** pSubplan);
|
int32_t qStringToSubplan(const char* pStr, SSubplan** pSubplan);
|
||||||
|
|
||||||
|
// Convert to subplan to msg for the scheduler to send to the executor
|
||||||
|
int32_t qSubPlanToMsg(const SSubplan* pSubplan, char** pStr, int32_t* pLen);
|
||||||
|
int32_t qMsgToSubplan(const char* pStr, int32_t len, SSubplan** pSubplan);
|
||||||
|
|
||||||
char* qQueryPlanToString(const SQueryPlan* pPlan);
|
char* qQueryPlanToString(const SQueryPlan* pPlan);
|
||||||
SQueryPlan* qStringToQueryPlan(const char* pStr);
|
SQueryPlan* qStringToQueryPlan(const char* pStr);
|
||||||
|
|
||||||
|
|
|
@ -117,6 +117,8 @@ typedef struct STableMeta {
|
||||||
|
|
||||||
typedef struct SDBVgInfo {
|
typedef struct SDBVgInfo {
|
||||||
int32_t vgVersion;
|
int32_t vgVersion;
|
||||||
|
int16_t hashPrefix;
|
||||||
|
int16_t hashSuffix;
|
||||||
int8_t hashMethod;
|
int8_t hashMethod;
|
||||||
int32_t numOfTable; // DB's table num, unit is TSDB_TABLE_NUM_UNIT
|
int32_t numOfTable; // DB's table num, unit is TSDB_TABLE_NUM_UNIT
|
||||||
SHashObj* vgHash; // key:vgId, value:SVgroupInfo
|
SHashObj* vgHash; // key:vgId, value:SVgroupInfo
|
||||||
|
|
|
@ -0,0 +1,78 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "tdatablock.h"
|
||||||
|
#include "tdbInt.h"
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef _STREAM_STATE_H_
|
||||||
|
#define _STREAM_STATE_H_
|
||||||
|
|
||||||
|
typedef struct SStreamTask SStreamTask;
|
||||||
|
|
||||||
|
// incremental state storage
|
||||||
|
typedef struct {
|
||||||
|
SStreamTask* pOwner;
|
||||||
|
TDB* db;
|
||||||
|
TTB* pStateDb;
|
||||||
|
TTB* pFuncStateDb;
|
||||||
|
TXN txn;
|
||||||
|
} SStreamState;
|
||||||
|
|
||||||
|
SStreamState* streamStateOpen(char* path, SStreamTask* pTask);
|
||||||
|
void streamStateClose(SStreamState* pState);
|
||||||
|
int32_t streamStateBegin(SStreamState* pState);
|
||||||
|
int32_t streamStateCommit(SStreamState* pState);
|
||||||
|
int32_t streamStateAbort(SStreamState* pState);
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
TBC* pCur;
|
||||||
|
} SStreamStateCur;
|
||||||
|
|
||||||
|
#if 1
|
||||||
|
int32_t streamStateFuncPut(SStreamState* pState, const STupleKey* key, const void* value, int32_t vLen);
|
||||||
|
int32_t streamStateFuncGet(SStreamState* pState, const STupleKey* key, void** pVal, int32_t* pVLen);
|
||||||
|
int32_t streamStateFuncDel(SStreamState* pState, const STupleKey* key);
|
||||||
|
|
||||||
|
int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
|
||||||
|
int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
||||||
|
int32_t streamStateDel(SStreamState* pState, const SWinKey* key);
|
||||||
|
int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
||||||
|
int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal);
|
||||||
|
void streamFreeVal(void* val);
|
||||||
|
|
||||||
|
SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key);
|
||||||
|
SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key);
|
||||||
|
SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key);
|
||||||
|
void streamStateFreeCur(SStreamStateCur* pCur);
|
||||||
|
|
||||||
|
int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
|
||||||
|
|
||||||
|
int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur);
|
||||||
|
int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur);
|
||||||
|
|
||||||
|
int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur);
|
||||||
|
int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* ifndef _STREAM_STATE_H_ */
|
|
@ -16,6 +16,7 @@
|
||||||
#include "executor.h"
|
#include "executor.h"
|
||||||
#include "os.h"
|
#include "os.h"
|
||||||
#include "query.h"
|
#include "query.h"
|
||||||
|
#include "streamState.h"
|
||||||
#include "tdatablock.h"
|
#include "tdatablock.h"
|
||||||
#include "tdbInt.h"
|
#include "tdbInt.h"
|
||||||
#include "tmsg.h"
|
#include "tmsg.h"
|
||||||
|
@ -263,14 +264,6 @@ typedef struct {
|
||||||
SArray* checkpointVer;
|
SArray* checkpointVer;
|
||||||
} SStreamRecoveringState;
|
} SStreamRecoveringState;
|
||||||
|
|
||||||
// incremental state storage
|
|
||||||
typedef struct {
|
|
||||||
SStreamTask* pOwner;
|
|
||||||
TDB* db;
|
|
||||||
TTB* pStateDb;
|
|
||||||
TXN txn;
|
|
||||||
} SStreamState;
|
|
||||||
|
|
||||||
typedef struct SStreamTask {
|
typedef struct SStreamTask {
|
||||||
int64_t streamId;
|
int64_t streamId;
|
||||||
int32_t taskId;
|
int32_t taskId;
|
||||||
|
@ -540,37 +533,6 @@ int32_t streamMetaCommit(SStreamMeta* pMeta);
|
||||||
int32_t streamMetaRollBack(SStreamMeta* pMeta);
|
int32_t streamMetaRollBack(SStreamMeta* pMeta);
|
||||||
int32_t streamLoadTasks(SStreamMeta* pMeta);
|
int32_t streamLoadTasks(SStreamMeta* pMeta);
|
||||||
|
|
||||||
SStreamState* streamStateOpen(char* path, SStreamTask* pTask);
|
|
||||||
void streamStateClose(SStreamState* pState);
|
|
||||||
int32_t streamStateBegin(SStreamState* pState);
|
|
||||||
int32_t streamStateCommit(SStreamState* pState);
|
|
||||||
int32_t streamStateAbort(SStreamState* pState);
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
TBC* pCur;
|
|
||||||
} SStreamStateCur;
|
|
||||||
|
|
||||||
#if 1
|
|
||||||
int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
|
|
||||||
int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
|
||||||
int32_t streamStateDel(SStreamState* pState, const SWinKey* key);
|
|
||||||
void streamFreeVal(void* val);
|
|
||||||
|
|
||||||
SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key);
|
|
||||||
SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key);
|
|
||||||
SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key);
|
|
||||||
void streamStateFreeCur(SStreamStateCur* pCur);
|
|
||||||
|
|
||||||
int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
|
|
||||||
|
|
||||||
int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur);
|
|
||||||
int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur);
|
|
||||||
|
|
||||||
int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur);
|
|
||||||
int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -444,6 +444,70 @@ void syncAppendEntriesReplyPrint2(char* s, const SyncAppendEntriesReply* pMsg);
|
||||||
void syncAppendEntriesReplyLog(const SyncAppendEntriesReply* pMsg);
|
void syncAppendEntriesReplyLog(const SyncAppendEntriesReply* pMsg);
|
||||||
void syncAppendEntriesReplyLog2(char* s, const SyncAppendEntriesReply* pMsg);
|
void syncAppendEntriesReplyLog2(char* s, const SyncAppendEntriesReply* pMsg);
|
||||||
|
|
||||||
|
// ---------------------------------------------
|
||||||
|
typedef struct SyncHeartbeat {
|
||||||
|
uint32_t bytes;
|
||||||
|
int32_t vgId;
|
||||||
|
uint32_t msgType;
|
||||||
|
SRaftId srcId;
|
||||||
|
SRaftId destId;
|
||||||
|
|
||||||
|
// private data
|
||||||
|
SyncTerm term;
|
||||||
|
SyncIndex commitIndex;
|
||||||
|
SyncTerm privateTerm;
|
||||||
|
} SyncHeartbeat;
|
||||||
|
|
||||||
|
SyncHeartbeat* syncHeartbeatBuild(int32_t vgId);
|
||||||
|
void syncHeartbeatDestroy(SyncHeartbeat* pMsg);
|
||||||
|
void syncHeartbeatSerialize(const SyncHeartbeat* pMsg, char* buf, uint32_t bufLen);
|
||||||
|
void syncHeartbeatDeserialize(const char* buf, uint32_t len, SyncHeartbeat* pMsg);
|
||||||
|
char* syncHeartbeatSerialize2(const SyncHeartbeat* pMsg, uint32_t* len);
|
||||||
|
SyncHeartbeat* syncHeartbeatDeserialize2(const char* buf, uint32_t len);
|
||||||
|
void syncHeartbeat2RpcMsg(const SyncHeartbeat* pMsg, SRpcMsg* pRpcMsg);
|
||||||
|
void syncHeartbeatFromRpcMsg(const SRpcMsg* pRpcMsg, SyncHeartbeat* pMsg);
|
||||||
|
SyncHeartbeat* syncHeartbeatFromRpcMsg2(const SRpcMsg* pRpcMsg);
|
||||||
|
cJSON* syncHeartbeat2Json(const SyncHeartbeat* pMsg);
|
||||||
|
char* syncHeartbeat2Str(const SyncHeartbeat* pMsg);
|
||||||
|
|
||||||
|
// for debug ----------------------
|
||||||
|
void syncHeartbeatPrint(const SyncHeartbeat* pMsg);
|
||||||
|
void syncHeartbeatPrint2(char* s, const SyncHeartbeat* pMsg);
|
||||||
|
void syncHeartbeatLog(const SyncHeartbeat* pMsg);
|
||||||
|
void syncHeartbeatLog2(char* s, const SyncHeartbeat* pMsg);
|
||||||
|
|
||||||
|
// ---------------------------------------------
|
||||||
|
typedef struct SyncHeartbeatReply {
|
||||||
|
uint32_t bytes;
|
||||||
|
int32_t vgId;
|
||||||
|
uint32_t msgType;
|
||||||
|
SRaftId srcId;
|
||||||
|
SRaftId destId;
|
||||||
|
|
||||||
|
// private data
|
||||||
|
SyncTerm term;
|
||||||
|
SyncTerm privateTerm;
|
||||||
|
int64_t startTime;
|
||||||
|
} SyncHeartbeatReply;
|
||||||
|
|
||||||
|
SyncHeartbeatReply* syncHeartbeatReplyBuild(int32_t vgId);
|
||||||
|
void syncHeartbeatReplyDestroy(SyncHeartbeatReply* pMsg);
|
||||||
|
void syncHeartbeatReplySerialize(const SyncHeartbeatReply* pMsg, char* buf, uint32_t bufLen);
|
||||||
|
void syncHeartbeatReplyDeserialize(const char* buf, uint32_t len, SyncHeartbeatReply* pMsg);
|
||||||
|
char* syncHeartbeatReplySerialize2(const SyncHeartbeatReply* pMsg, uint32_t* len);
|
||||||
|
SyncHeartbeatReply* syncHeartbeatReplyDeserialize2(const char* buf, uint32_t len);
|
||||||
|
void syncHeartbeatReply2RpcMsg(const SyncHeartbeatReply* pMsg, SRpcMsg* pRpcMsg);
|
||||||
|
void syncHeartbeatReplyFromRpcMsg(const SRpcMsg* pRpcMsg, SyncHeartbeatReply* pMsg);
|
||||||
|
SyncHeartbeatReply* syncHeartbeatReplyFromRpcMsg2(const SRpcMsg* pRpcMsg);
|
||||||
|
cJSON* syncHeartbeatReply2Json(const SyncHeartbeatReply* pMsg);
|
||||||
|
char* syncHeartbeatReply2Str(const SyncHeartbeatReply* pMsg);
|
||||||
|
|
||||||
|
// for debug ----------------------
|
||||||
|
void syncHeartbeatReplyPrint(const SyncHeartbeatReply* pMsg);
|
||||||
|
void syncHeartbeatReplyPrint2(char* s, const SyncHeartbeatReply* pMsg);
|
||||||
|
void syncHeartbeatReplyLog(const SyncHeartbeatReply* pMsg);
|
||||||
|
void syncHeartbeatReplyLog2(char* s, const SyncHeartbeatReply* pMsg);
|
||||||
|
|
||||||
// ---------------------------------------------
|
// ---------------------------------------------
|
||||||
typedef struct SyncApplyMsg {
|
typedef struct SyncApplyMsg {
|
||||||
uint32_t bytes;
|
uint32_t bytes;
|
||||||
|
|
|
@ -69,6 +69,14 @@ void tfsUpdateSize(STfs *pTfs);
|
||||||
*/
|
*/
|
||||||
SDiskSize tfsGetSize(STfs *pTfs);
|
SDiskSize tfsGetSize(STfs *pTfs);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Get level of multi-tier storage.
|
||||||
|
*
|
||||||
|
* @param pTfs
|
||||||
|
* @return int32_t
|
||||||
|
*/
|
||||||
|
int32_t tfsGetLevel(STfs *pTfs);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Allocate an existing available tier level from fs.
|
* @brief Allocate an existing available tier level from fs.
|
||||||
*
|
*
|
||||||
|
|
|
@ -56,6 +56,7 @@ void taosRemoveDir(const char *dirname);
|
||||||
bool taosDirExist(const char *dirname);
|
bool taosDirExist(const char *dirname);
|
||||||
int32_t taosMkDir(const char *dirname);
|
int32_t taosMkDir(const char *dirname);
|
||||||
int32_t taosMulMkDir(const char *dirname);
|
int32_t taosMulMkDir(const char *dirname);
|
||||||
|
int32_t taosMulModeMkDir(const char *dirname, int mode);
|
||||||
void taosRemoveOldFiles(const char *dirname, int32_t keepDays);
|
void taosRemoveOldFiles(const char *dirname, int32_t keepDays);
|
||||||
int32_t taosExpandDir(const char *dirname, char *outname, int32_t maxlen);
|
int32_t taosExpandDir(const char *dirname, char *outname, int32_t maxlen);
|
||||||
int32_t taosRealPath(char *dirname, char *realPath, int32_t maxlen);
|
int32_t taosRealPath(char *dirname, char *realPath, int32_t maxlen);
|
||||||
|
|
|
@ -285,6 +285,7 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_MND_TOPIC_SUBSCRIBED TAOS_DEF_ERROR_CODE(0, 0x03EB)
|
#define TSDB_CODE_MND_TOPIC_SUBSCRIBED TAOS_DEF_ERROR_CODE(0, 0x03EB)
|
||||||
#define TSDB_CODE_MND_CGROUP_USED TAOS_DEF_ERROR_CODE(0, 0x03EC)
|
#define TSDB_CODE_MND_CGROUP_USED TAOS_DEF_ERROR_CODE(0, 0x03EC)
|
||||||
#define TSDB_CODE_MND_TOPIC_MUST_BE_DELETED TAOS_DEF_ERROR_CODE(0, 0x03ED)
|
#define TSDB_CODE_MND_TOPIC_MUST_BE_DELETED TAOS_DEF_ERROR_CODE(0, 0x03ED)
|
||||||
|
#define TSDB_CODE_MND_IN_REBALANCE TAOS_DEF_ERROR_CODE(0, 0x03EF)
|
||||||
|
|
||||||
// mnode-stream
|
// mnode-stream
|
||||||
#define TSDB_CODE_MND_STREAM_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03F0)
|
#define TSDB_CODE_MND_STREAM_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03F0)
|
||||||
|
@ -577,6 +578,7 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_FUNC_FUNTION_PARA_TYPE TAOS_DEF_ERROR_CODE(0, 0x2802)
|
#define TSDB_CODE_FUNC_FUNTION_PARA_TYPE TAOS_DEF_ERROR_CODE(0, 0x2802)
|
||||||
#define TSDB_CODE_FUNC_FUNTION_PARA_VALUE TAOS_DEF_ERROR_CODE(0, 0x2803)
|
#define TSDB_CODE_FUNC_FUNTION_PARA_VALUE TAOS_DEF_ERROR_CODE(0, 0x2803)
|
||||||
#define TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804)
|
#define TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804)
|
||||||
|
#define TSDB_CODE_FUNC_DUP_TIMESTAMP TAOS_DEF_ERROR_CODE(0, 0x2805)
|
||||||
|
|
||||||
//udf
|
//udf
|
||||||
#define TSDB_CODE_UDF_STOPPING TAOS_DEF_ERROR_CODE(0, 0x2901)
|
#define TSDB_CODE_UDF_STOPPING TAOS_DEF_ERROR_CODE(0, 0x2901)
|
||||||
|
@ -616,6 +618,7 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP TAOS_DEF_ERROR_CODE(0, 0x3155)
|
#define TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP TAOS_DEF_ERROR_CODE(0, 0x3155)
|
||||||
#define TSDB_CODE_RSMA_EMPTY_INFO TAOS_DEF_ERROR_CODE(0, 0x3156)
|
#define TSDB_CODE_RSMA_EMPTY_INFO TAOS_DEF_ERROR_CODE(0, 0x3156)
|
||||||
#define TSDB_CODE_RSMA_INVALID_SCHEMA TAOS_DEF_ERROR_CODE(0, 0x3157)
|
#define TSDB_CODE_RSMA_INVALID_SCHEMA TAOS_DEF_ERROR_CODE(0, 0x3157)
|
||||||
|
#define TSDB_CODE_RSMA_REGEX_MATCH TAOS_DEF_ERROR_CODE(0, 0x3158)
|
||||||
|
|
||||||
//index
|
//index
|
||||||
#define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200)
|
#define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200)
|
||||||
|
|
|
@ -300,6 +300,9 @@ typedef enum ELogicConditionType {
|
||||||
#define TSDB_DEFAULT_PAGES_PER_VNODE 256
|
#define TSDB_DEFAULT_PAGES_PER_VNODE 256
|
||||||
#define TSDB_MIN_PAGESIZE_PER_VNODE 1 // unit KB
|
#define TSDB_MIN_PAGESIZE_PER_VNODE 1 // unit KB
|
||||||
#define TSDB_MAX_PAGESIZE_PER_VNODE 16384
|
#define TSDB_MAX_PAGESIZE_PER_VNODE 16384
|
||||||
|
#define TSDB_DEFAULT_TSDB_PAGESIZE 4
|
||||||
|
#define TSDB_MIN_TSDB_PAGESIZE 1 // unit KB
|
||||||
|
#define TSDB_MAX_TSDB_PAGESIZE 16384
|
||||||
#define TSDB_DEFAULT_PAGESIZE_PER_VNODE 4
|
#define TSDB_DEFAULT_PAGESIZE_PER_VNODE 4
|
||||||
#define TSDB_MIN_DAYS_PER_FILE 60 // unit minute
|
#define TSDB_MIN_DAYS_PER_FILE 60 // unit minute
|
||||||
#define TSDB_MAX_DAYS_PER_FILE (3650 * 1440)
|
#define TSDB_MAX_DAYS_PER_FILE (3650 * 1440)
|
||||||
|
@ -359,15 +362,27 @@ typedef enum ELogicConditionType {
|
||||||
#define TSDB_DB_SCHEMALESS_ON 1
|
#define TSDB_DB_SCHEMALESS_ON 1
|
||||||
#define TSDB_DB_SCHEMALESS_OFF 0
|
#define TSDB_DB_SCHEMALESS_OFF 0
|
||||||
#define TSDB_DEFAULT_DB_SCHEMALESS TSDB_DB_SCHEMALESS_OFF
|
#define TSDB_DEFAULT_DB_SCHEMALESS TSDB_DB_SCHEMALESS_OFF
|
||||||
|
#define TSDB_MIN_STT_TRIGGER 1
|
||||||
|
#define TSDB_MAX_STT_TRIGGER 16
|
||||||
|
#define TSDB_DEFAULT_SST_TRIGGER 8
|
||||||
|
#define TSDB_MIN_HASH_PREFIX 0
|
||||||
|
#define TSDB_MAX_HASH_PREFIX 128
|
||||||
|
#define TSDB_DEFAULT_HASH_PREFIX 0
|
||||||
|
#define TSDB_MIN_HASH_SUFFIX 0
|
||||||
|
#define TSDB_MAX_HASH_SUFFIX 128
|
||||||
|
#define TSDB_DEFAULT_HASH_SUFFIX 0
|
||||||
|
|
||||||
#define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1
|
#define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1
|
||||||
#define TSDB_DEFAULT_DB_WAL_RETENTION_PERIOD (24 * 60 * 60 * 4)
|
#define TSDB_REP_DEF_DB_WAL_RET_PERIOD 0
|
||||||
#define TSDB_DB_MIN_WAL_RETENTION_SIZE -1
|
#define TSDB_REPS_DEF_DB_WAL_RET_PERIOD (24 * 60 * 60 * 4)
|
||||||
#define TSDB_DEFAULT_DB_WAL_RETENTION_SIZE -1
|
#define TSDB_DB_MIN_WAL_RETENTION_SIZE -1
|
||||||
#define TSDB_DB_MIN_WAL_ROLL_PERIOD 0
|
#define TSDB_REP_DEF_DB_WAL_RET_SIZE 0
|
||||||
#define TSDB_DEFAULT_DB_WAL_ROLL_PERIOD (24 * 60 * 60 * 1)
|
#define TSDB_REPS_DEF_DB_WAL_RET_SIZE -1
|
||||||
#define TSDB_DB_MIN_WAL_SEGMENT_SIZE 0
|
#define TSDB_DB_MIN_WAL_ROLL_PERIOD 0
|
||||||
#define TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE 0
|
#define TSDB_REP_DEF_DB_WAL_ROLL_PERIOD 0
|
||||||
|
#define TSDB_REPS_DEF_DB_WAL_ROLL_PERIOD (24 * 60 * 60 * 1)
|
||||||
|
#define TSDB_DB_MIN_WAL_SEGMENT_SIZE 0
|
||||||
|
#define TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE 0
|
||||||
|
|
||||||
#define TSDB_MIN_ROLLUP_MAX_DELAY 1 // unit millisecond
|
#define TSDB_MIN_ROLLUP_MAX_DELAY 1 // unit millisecond
|
||||||
#define TSDB_MAX_ROLLUP_MAX_DELAY (15 * 60 * 1000)
|
#define TSDB_MAX_ROLLUP_MAX_DELAY (15 * 60 * 1000)
|
||||||
|
@ -386,7 +401,7 @@ typedef enum ELogicConditionType {
|
||||||
|
|
||||||
#define TSDB_DEFAULT_EXPLAIN_VERBOSE false
|
#define TSDB_DEFAULT_EXPLAIN_VERBOSE false
|
||||||
|
|
||||||
#define TSDB_EXPLAIN_RESULT_ROW_SIZE (16*1024)
|
#define TSDB_EXPLAIN_RESULT_ROW_SIZE (16 * 1024)
|
||||||
#define TSDB_EXPLAIN_RESULT_COLUMN_NAME "QUERY_PLAN"
|
#define TSDB_EXPLAIN_RESULT_COLUMN_NAME "QUERY_PLAN"
|
||||||
|
|
||||||
#define TSDB_MAX_FIELD_LEN 16384
|
#define TSDB_MAX_FIELD_LEN 16384
|
||||||
|
|
|
@ -0,0 +1,78 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _TD_UTIL_RBTREE_H_
|
||||||
|
#define _TD_UTIL_RBTREE_H_
|
||||||
|
|
||||||
|
#include "os.h"
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
typedef struct SRBTree SRBTree;
|
||||||
|
typedef struct SRBTreeNode SRBTreeNode;
|
||||||
|
typedef struct SRBTreeIter SRBTreeIter;
|
||||||
|
|
||||||
|
typedef int32_t (*tRBTreeCmprFn)(const void *, const void *);
|
||||||
|
|
||||||
|
// SRBTree =============================================
|
||||||
|
#define tRBTreeMin(T) ((T)->min == ((T)->NIL) ? NULL : (T)->min)
|
||||||
|
#define tRBTreeMax(T) ((T)->max == ((T)->NIL) ? NULL : (T)->max)
|
||||||
|
|
||||||
|
void tRBTreeCreate(SRBTree *pTree, tRBTreeCmprFn cmprFn);
|
||||||
|
SRBTreeNode *tRBTreePut(SRBTree *pTree, SRBTreeNode *z);
|
||||||
|
void tRBTreeDrop(SRBTree *pTree, SRBTreeNode *z);
|
||||||
|
SRBTreeNode *tRBTreeDropByKey(SRBTree *pTree, void *pKey);
|
||||||
|
SRBTreeNode *tRBTreeGet(SRBTree *pTree, void *pKey);
|
||||||
|
|
||||||
|
// SRBTreeIter =============================================
|
||||||
|
#define tRBTreeIterCreate(tree, ascend) \
|
||||||
|
(SRBTreeIter) { .asc = (ascend), .pTree = (tree), .pNode = (ascend) ? (tree)->min : (tree)->max }
|
||||||
|
|
||||||
|
SRBTreeNode *tRBTreeIterNext(SRBTreeIter *pIter);
|
||||||
|
|
||||||
|
// STRUCT =============================================
|
||||||
|
typedef enum { RED, BLACK } ECOLOR;
|
||||||
|
struct SRBTreeNode {
|
||||||
|
ECOLOR color;
|
||||||
|
SRBTreeNode *parent;
|
||||||
|
SRBTreeNode *left;
|
||||||
|
SRBTreeNode *right;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define RBTREE_NODE_PAYLOAD(N) ((const void *)&(N)[1])
|
||||||
|
|
||||||
|
struct SRBTree {
|
||||||
|
tRBTreeCmprFn cmprFn;
|
||||||
|
int64_t n;
|
||||||
|
SRBTreeNode *root;
|
||||||
|
SRBTreeNode *min;
|
||||||
|
SRBTreeNode *max;
|
||||||
|
SRBTreeNode *NIL;
|
||||||
|
SRBTreeNode NILNODE;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct SRBTreeIter {
|
||||||
|
int8_t asc;
|
||||||
|
SRBTree *pTree;
|
||||||
|
SRBTreeNode *pNode;
|
||||||
|
};
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /*_TD_UTIL_RBTREE_H_*/
|
|
@ -31,7 +31,6 @@ typedef struct SSchedMsg {
|
||||||
void *thandle;
|
void *thandle;
|
||||||
} SSchedMsg;
|
} SSchedMsg;
|
||||||
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char label[TSDB_LABEL_LEN];
|
char label[TSDB_LABEL_LEN];
|
||||||
tsem_t emptySem;
|
tsem_t emptySem;
|
||||||
|
@ -48,7 +47,6 @@ typedef struct {
|
||||||
void *pTimer;
|
void *pTimer;
|
||||||
} SSchedQueue;
|
} SSchedQueue;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a thread-safe ring-buffer based task queue and return the instance. A thread
|
* Create a thread-safe ring-buffer based task queue and return the instance. A thread
|
||||||
* pool will be created to consume the messages in the queue.
|
* pool will be created to consume the messages in the queue.
|
||||||
|
@ -57,7 +55,7 @@ typedef struct {
|
||||||
* @param label the label of the queue
|
* @param label the label of the queue
|
||||||
* @return the created queue scheduler
|
* @return the created queue scheduler
|
||||||
*/
|
*/
|
||||||
void *taosInitScheduler(int32_t capacity, int32_t numOfThreads, const char *label, SSchedQueue* pSched);
|
void *taosInitScheduler(int32_t capacity, int32_t numOfThreads, const char *label, SSchedQueue *pSched);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a thread-safe ring-buffer based task queue and return the instance.
|
* Create a thread-safe ring-buffer based task queue and return the instance.
|
||||||
|
@ -83,7 +81,7 @@ void taosCleanUpScheduler(void *queueScheduler);
|
||||||
* @param queueScheduler the queue scheduler instance
|
* @param queueScheduler the queue scheduler instance
|
||||||
* @param pMsg the message for the task
|
* @param pMsg the message for the task
|
||||||
*/
|
*/
|
||||||
void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg);
|
int taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include "tcrc32c.h"
|
#include "tcrc32c.h"
|
||||||
#include "tdef.h"
|
#include "tdef.h"
|
||||||
#include "tmd5.h"
|
#include "tmd5.h"
|
||||||
|
#include "thash.h"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
@ -68,6 +69,19 @@ static FORCE_INLINE void taosEncryptPass_c(uint8_t *inBuf, size_t len, char *tar
|
||||||
memcpy(target, buf, TSDB_PASSWORD_LEN);
|
memcpy(target, buf, TSDB_PASSWORD_LEN);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static FORCE_INLINE int32_t taosGetTbHashVal(const char *tbname, int32_t tblen, int32_t method, int32_t prefix,
|
||||||
|
int32_t suffix) {
|
||||||
|
if (prefix == 0 && suffix == 0) {
|
||||||
|
return MurmurHash3_32(tbname, tblen);
|
||||||
|
} else {
|
||||||
|
if (tblen <= (prefix + suffix)) {
|
||||||
|
return MurmurHash3_32(tbname, tblen);
|
||||||
|
} else {
|
||||||
|
return MurmurHash3_32(tbname + prefix, tblen - prefix - suffix);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -5,13 +5,6 @@ def sync_source(branch_name) {
|
||||||
echo ''' + branch_name + '''
|
echo ''' + branch_name + '''
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd ${TDINTERNAL_ROOT_DIR}
|
|
||||||
git reset --hard
|
|
||||||
git fetch || git fetch
|
|
||||||
git checkout ''' + branch_name + ''' -f
|
|
||||||
git branch
|
|
||||||
git pull || git pull
|
|
||||||
git log | head -n 20
|
|
||||||
cd ${TDENGINE_ROOT_DIR}
|
cd ${TDENGINE_ROOT_DIR}
|
||||||
git reset --hard
|
git reset --hard
|
||||||
git fetch || git fetch
|
git fetch || git fetch
|
||||||
|
@ -64,17 +57,12 @@ pipeline {
|
||||||
defaultValue:'2.1.2',
|
defaultValue:'2.1.2',
|
||||||
description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1'
|
description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1'
|
||||||
)
|
)
|
||||||
string (
|
|
||||||
name:'nasPassword',
|
|
||||||
defaultValue:'password',
|
|
||||||
description: 'the pasword of the NAS server which has installPackage-192.168.1.131'
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
environment{
|
environment{
|
||||||
WORK_DIR = '/var/lib/jenkins/workspace'
|
WORK_DIR = '/var/lib/jenkins/workspace'
|
||||||
TDINTERNAL_ROOT_DIR = '/var/lib/jenkins/workspace/TDinternal'
|
TDINTERNAL_ROOT_DIR = '/var/lib/jenkins/workspace/TDinternal'
|
||||||
TDENGINE_ROOT_DIR = '/var/lib/jenkins/workspace/TDinternal/community'
|
TDENGINE_ROOT_DIR = '/var/lib/jenkins/workspace/TDinternal/community'
|
||||||
BRANCH_NAME = '3.0'
|
BRANCH_NAME = 'test/chr/TD-14699'
|
||||||
|
|
||||||
TD_SERVER_TAR = "TDengine-server-${version}-Linux-x64.tar.gz"
|
TD_SERVER_TAR = "TDengine-server-${version}-Linux-x64.tar.gz"
|
||||||
BASE_TD_SERVER_TAR = "TDengine-server-${baseVersion}-Linux-x64.tar.gz"
|
BASE_TD_SERVER_TAR = "TDengine-server-${baseVersion}-Linux-x64.tar.gz"
|
||||||
|
@ -107,7 +95,7 @@ pipeline {
|
||||||
|
|
||||||
}
|
}
|
||||||
stages {
|
stages {
|
||||||
stage ('RUN') {
|
stage ('Test Server') {
|
||||||
parallel {
|
parallel {
|
||||||
stage('ubuntu16') {
|
stage('ubuntu16') {
|
||||||
agent{label " ubuntu16 "}
|
agent{label " ubuntu16 "}
|
||||||
|
@ -116,17 +104,17 @@ pipeline {
|
||||||
sync_source("${BRANCH_NAME}")
|
sync_source("${BRANCH_NAME}")
|
||||||
sh '''
|
sh '''
|
||||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||||
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
|
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
|
||||||
python3 checkPackageRuning.py
|
python3 checkPackageRuning.py
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||||
bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${nasPassword}
|
bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server
|
||||||
python3 checkPackageRuning.py
|
python3 checkPackageRuning.py
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||||
bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
|
bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
|
||||||
python3 checkPackageRuning.py
|
python3 checkPackageRuning.py
|
||||||
'''
|
'''
|
||||||
}
|
}
|
||||||
|
@ -139,24 +127,21 @@ pipeline {
|
||||||
sync_source("${BRANCH_NAME}")
|
sync_source("${BRANCH_NAME}")
|
||||||
sh '''
|
sh '''
|
||||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||||
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
|
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
|
||||||
python3 checkPackageRuning.py
|
python3 checkPackageRuning.py
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||||
bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${nasPassword}
|
bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server
|
||||||
python3 checkPackageRuning.py
|
python3 checkPackageRuning.py
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||||
bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
|
bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
|
||||||
python3 checkPackageRuning.py
|
|
||||||
'''
|
|
||||||
sh '''
|
|
||||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
|
||||||
bash testpackage.sh ${TD_CLIENT_TAR} ${version} ${BASE_TD_CLIENT_TAR} ${baseVersion} client ${nasPassword}
|
|
||||||
python3 checkPackageRuning.py
|
python3 checkPackageRuning.py
|
||||||
|
dpkg -r tdengine
|
||||||
'''
|
'''
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -167,17 +152,17 @@ pipeline {
|
||||||
sync_source("${BRANCH_NAME}")
|
sync_source("${BRANCH_NAME}")
|
||||||
sh '''
|
sh '''
|
||||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||||
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
|
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
|
||||||
python3 checkPackageRuning.py
|
python3 checkPackageRuning.py
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||||
bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${nasPassword}
|
bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server
|
||||||
python3 checkPackageRuning.py
|
python3 checkPackageRuning.py
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||||
bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
|
bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
|
||||||
python3 checkPackageRuning.py
|
python3 checkPackageRuning.py
|
||||||
'''
|
'''
|
||||||
}
|
}
|
||||||
|
@ -190,28 +175,23 @@ pipeline {
|
||||||
sync_source("${BRANCH_NAME}")
|
sync_source("${BRANCH_NAME}")
|
||||||
sh '''
|
sh '''
|
||||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||||
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
|
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
|
||||||
python3 checkPackageRuning.py
|
python3 checkPackageRuning.py
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||||
bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${nasPassword}
|
bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server
|
||||||
python3 checkPackageRuning.py
|
python3 checkPackageRuning.py
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||||
bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
|
bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
|
||||||
python3 checkPackageRuning.py
|
python3 checkPackageRuning.py
|
||||||
'''
|
sudo rpm -e tdengine
|
||||||
sh '''
|
'''
|
||||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
|
||||||
bash testpackage.sh ${TD_CLIENT_LITE_TAR} ${version} ${BASE_TD_CLIENT_LITE_TAR} ${baseVersion} client ${nasPassword}
|
|
||||||
python3 checkPackageRuning.py
|
|
||||||
'''
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
stage('arm64') {
|
stage('arm64') {
|
||||||
agent{label 'linux_arm64'}
|
agent{label 'linux_arm64'}
|
||||||
steps {
|
steps {
|
||||||
|
@ -219,18 +199,53 @@ pipeline {
|
||||||
sync_source("${BRANCH_NAME}")
|
sync_source("${BRANCH_NAME}")
|
||||||
sh '''
|
sh '''
|
||||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||||
bash testpackage.sh ${TD_SERVER_ARM_TAR} ${version} ${BASE_TD_SERVER_ARM_TAR} ${baseVersion} server ${nasPassword}
|
bash testpackage.sh ${TD_SERVER_ARM_TAR} ${version} ${BASE_TD_SERVER_ARM_TAR} ${baseVersion} server
|
||||||
python3 checkPackageRuning.py
|
python3 checkPackageRuning.py
|
||||||
'''
|
'''
|
||||||
sh '''
|
|
||||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
|
||||||
bash testpackage.sh ${TD_CLIENT_ARM_TAR} ${version} ${BASE_TD_CLIENT_ARM_TAR} ${baseVersion} client ${nasPassword}
|
|
||||||
python3 checkPackageRuning.py
|
|
||||||
'''
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
stage ('Test Client') {
|
||||||
|
parallel {
|
||||||
|
stage('ubuntu18') {
|
||||||
|
agent{label " ubuntu18 "}
|
||||||
|
steps {
|
||||||
|
timeout(time: 30, unit: 'MINUTES'){
|
||||||
|
sh '''
|
||||||
|
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||||
|
bash testpackage.sh ${TD_CLIENT_TAR} ${version} ${BASE_TD_CLIENT_TAR} ${baseVersion} client
|
||||||
|
python3 checkPackageRuning.py 192.168.0.21
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('centos8') {
|
||||||
|
agent{label " centos8_3 "}
|
||||||
|
steps {
|
||||||
|
timeout(time: 30, unit: 'MINUTES'){
|
||||||
|
sh '''
|
||||||
|
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||||
|
bash testpackage.sh ${TD_CLIENT_LITE_TAR} ${version} ${BASE_TD_CLIENT_LITE_TAR} ${baseVersion} client
|
||||||
|
python3 checkPackageRuning.py 192.168.0.24
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('arm64-client') {
|
||||||
|
agent{label " linux_arm64 "}
|
||||||
|
steps {
|
||||||
|
timeout(time: 30, unit: 'MINUTES'){
|
||||||
|
sh '''
|
||||||
|
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||||
|
bash testpackage.sh ${TD_CLIENT_ARM_TAR} ${version} ${BASE_TD_CLIENT_ARM_TAR} ${baseVersion} client
|
||||||
|
python3 checkPackageRuning.py 192.168.0.21
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -19,12 +19,19 @@ import subprocess
|
||||||
# from this import d
|
# from this import d
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
|
||||||
|
if( len(sys.argv)>1 ):
|
||||||
|
serverHost=sys.argv[1]
|
||||||
|
else:
|
||||||
|
serverHost="localhost"
|
||||||
|
|
||||||
|
|
||||||
# install taospy
|
# install taospy
|
||||||
|
|
||||||
out = subprocess.getoutput("pip3 show taospy|grep Version| awk -F ':' '{print $2}' ")
|
out = subprocess.getoutput("pip3 show taospy|grep Version| awk -F ':' '{print $2}' ")
|
||||||
print("taospy version %s "%out)
|
print("taospy version %s "%out)
|
||||||
if (out == "" ):
|
if (out == "" ):
|
||||||
os.system("pip install git+https://github.com/taosdata/taos-connector-python.git")
|
os.system("pip3 install git+https://github.com/taosdata/taos-connector-python.git")
|
||||||
print("install taos python connector")
|
print("install taos python connector")
|
||||||
else:
|
else:
|
||||||
os.system("pip3 install --upgrade taospy ")
|
os.system("pip3 install --upgrade taospy ")
|
||||||
|
@ -32,19 +39,19 @@ else:
|
||||||
|
|
||||||
|
|
||||||
# start taosd prepare
|
# start taosd prepare
|
||||||
os.system("rm -rf /var/lib/taos/*")
|
# os.system("rm -rf /var/lib/taos/*")
|
||||||
os.system("systemctl restart taosd ")
|
# os.system("systemctl restart taosd ")
|
||||||
|
|
||||||
# wait a moment ,at least 5 seconds
|
# wait a moment ,at least 5 seconds
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
|
|
||||||
# prepare data by taosBenchmark
|
# prepare data by taosBenchmark
|
||||||
|
|
||||||
os.system("taosBenchmark -y -n 100 -t 100")
|
os.system("taosBenchmark -y -n 100 -t 100 -h %s "%serverHost )
|
||||||
|
|
||||||
import taos
|
import taos
|
||||||
|
|
||||||
conn = taos.connect(host="localhost",
|
conn = taos.connect(host="%s"%serverHost,
|
||||||
user="root",
|
user="root",
|
||||||
password="taosdata",
|
password="taosdata",
|
||||||
database="test",
|
database="test",
|
||||||
|
@ -80,15 +87,15 @@ os.system("rm -rf /tmp/dumpdata/*")
|
||||||
# dump data out
|
# dump data out
|
||||||
print("taosdump dump out data")
|
print("taosdump dump out data")
|
||||||
|
|
||||||
os.system("taosdump -o /tmp/dumpdata -D test -y ")
|
os.system("taosdump -o /tmp/dumpdata -D test -y -h %s "%serverHost)
|
||||||
|
|
||||||
# drop database of test
|
# drop database of test
|
||||||
print("drop database test")
|
print("drop database test")
|
||||||
os.system(" taos -s ' drop database test ;' ")
|
os.system(" taos -s ' drop database test ;' -h %s "%serverHost)
|
||||||
|
|
||||||
# dump data in
|
# dump data in
|
||||||
print("taosdump dump data in")
|
print("taosdump dump data in")
|
||||||
os.system("taosdump -i /tmp/dumpdata -y ")
|
os.system("taosdump -i /tmp/dumpdata -y -h %s "%serverHost)
|
||||||
|
|
||||||
result = conn.query("SELECT count(*) from test.meters")
|
result = conn.query("SELECT count(*) from test.meters")
|
||||||
|
|
||||||
|
|
|
@ -45,6 +45,7 @@ mkdir -p ${pkg_dir}${install_home_path}/include
|
||||||
mkdir -p ${pkg_dir}${install_home_path}/script
|
mkdir -p ${pkg_dir}${install_home_path}/script
|
||||||
|
|
||||||
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
|
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
|
||||||
|
cp ${compile_dir}/../packaging/cfg/taosd.service ${pkg_dir}${install_home_path}/cfg
|
||||||
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
|
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
|
||||||
cp ${compile_dir}/test/cfg/taosadapter.toml ${pkg_dir}${install_home_path}/cfg || :
|
cp ${compile_dir}/test/cfg/taosadapter.toml ${pkg_dir}${install_home_path}/cfg || :
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -11,3 +11,5 @@ expect "*one:"
|
||||||
send "\r"
|
send "\r"
|
||||||
expect "*skip:"
|
expect "*skip:"
|
||||||
send "\r"
|
send "\r"
|
||||||
|
|
||||||
|
expect eof
|
|
@ -7,7 +7,6 @@ originPackageName=$3
|
||||||
originversion=$4
|
originversion=$4
|
||||||
testFile=$5
|
testFile=$5
|
||||||
subFile="taos.tar.gz"
|
subFile="taos.tar.gz"
|
||||||
password=$6
|
|
||||||
|
|
||||||
# Color setting
|
# Color setting
|
||||||
RED='\033[41;30m'
|
RED='\033[41;30m'
|
||||||
|
@ -68,11 +67,37 @@ fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function wgetFile {
|
||||||
|
|
||||||
|
file=$1
|
||||||
|
|
||||||
|
if [ ! -f ${file} ];then
|
||||||
|
echoColor BD "wget https://www.taosdata.com/assets-download/3.0/${file}"
|
||||||
|
wget https://www.taosdata.com/assets-download/3.0/${file}
|
||||||
|
else
|
||||||
|
echoColor YD "${file} already exists "
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function newPath {
|
||||||
|
|
||||||
|
buildPath=$1
|
||||||
|
|
||||||
|
if [ ! -d ${buildPath} ] ;then
|
||||||
|
echoColor BD "mkdir -p ${buildPath}"
|
||||||
|
mkdir -p ${buildPath}
|
||||||
|
else
|
||||||
|
echoColor YD "${buildPath} already exists"
|
||||||
|
fi
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
echoColor G "===== install basesoft ====="
|
echoColor G "===== install basesoft ====="
|
||||||
|
|
||||||
cmdInstall tree
|
cmdInstall tree
|
||||||
cmdInstall wget
|
cmdInstall wget
|
||||||
cmdInstall sshpass
|
cmdInstall expect
|
||||||
|
|
||||||
echoColor G "===== Uninstall all components of TDeingne ====="
|
echoColor G "===== Uninstall all components of TDeingne ====="
|
||||||
|
|
||||||
|
@ -97,11 +122,14 @@ echoColor G "===== new workroom path ====="
|
||||||
installPath="/usr/local/src/packageTest"
|
installPath="/usr/local/src/packageTest"
|
||||||
oriInstallPath="/usr/local/src/packageTest/3.1"
|
oriInstallPath="/usr/local/src/packageTest/3.1"
|
||||||
|
|
||||||
if [ ! -d ${installPath} ] ;then
|
newPath ${installPath}
|
||||||
echoColor BD "mkdir -p ${installPath}"
|
|
||||||
mkdir -p ${installPath}
|
newPath ${oriInstallPath}
|
||||||
else
|
|
||||||
echoColor YD "${installPath} already exists"
|
|
||||||
|
if [ -d ${oriInstallPath}/${originTdpPath} ] ;then
|
||||||
|
echoColor BD "rm -rf ${oriInstallPath}/${originTdpPath}/*"
|
||||||
|
rm -rf ${oriInstallPath}/${originTdpPath}/*
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -d ${installPath}/${tdPath} ] ;then
|
if [ -d ${installPath}/${tdPath} ] ;then
|
||||||
|
@ -109,33 +137,13 @@ if [ -d ${installPath}/${tdPath} ] ;then
|
||||||
rm -rf ${installPath}/${tdPath}/*
|
rm -rf ${installPath}/${tdPath}/*
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -d ${oriInstallPath} ] ;then
|
|
||||||
echoColor BD "mkdir -p ${oriInstallPath}"
|
|
||||||
mkdir -p ${oriInstallPath}
|
|
||||||
else
|
|
||||||
echoColor YD "${oriInstallPath} already exists"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -d ${oriInstallPath}/${originTdpPath} ] ;then
|
|
||||||
echoColor BD "rm -rf ${oriInstallPath}/${originTdpPath}/*"
|
|
||||||
rm -rf ${oriInstallPath}/${originTdpPath}/*
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
echoColor G "===== download installPackage ====="
|
echoColor G "===== download installPackage ====="
|
||||||
# cd ${installPath}
|
cd ${installPath} && wgetFile ${packgeName}
|
||||||
# wget https://www.taosdata.com/assets-download/3.0/${packgeName}
|
cd ${oriInstallPath} && wgetFile ${originPackageName}
|
||||||
# cd ${oriInstallPath}
|
|
||||||
# wget https://www.taosdata.com/assets-download/3.0/${originPackageName}
|
|
||||||
|
|
||||||
cd ${installPath}
|
cd ${installPath}
|
||||||
cp -r ${scriptDir}/debRpmAutoInstall.sh .
|
cp -r ${scriptDir}/debRpmAutoInstall.sh .
|
||||||
|
|
||||||
if [ ! -f {packgeName} ];then
|
|
||||||
echoColor BD "sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} ."
|
|
||||||
sshpass -p ${password} scp -oStrictHostKeyChecking=no -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} .
|
|
||||||
fi
|
|
||||||
|
|
||||||
packageSuffix=$(echo ${packgeName} | awk -F '.' '{print $NF}')
|
packageSuffix=$(echo ${packgeName} | awk -F '.' '{print $NF}')
|
||||||
|
|
||||||
|
|
||||||
|
@ -181,8 +189,7 @@ elif [[ ${packgeName} =~ "tar" ]];then
|
||||||
cd ${oriInstallPath}
|
cd ${oriInstallPath}
|
||||||
if [ ! -f {originPackageName} ];then
|
if [ ! -f {originPackageName} ];then
|
||||||
echoColor YD "download base installPackage"
|
echoColor YD "download base installPackage"
|
||||||
echoColor BD "sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${originversion}/community/${originPackageName} ."
|
wgetFile ${originPackageName}
|
||||||
sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${originversion}/community/${originPackageName} .
|
|
||||||
fi
|
fi
|
||||||
echoColor YD "unzip the base installation package"
|
echoColor YD "unzip the base installation package"
|
||||||
echoColor BD "tar -xf ${originPackageName}" && tar -xf ${originPackageName}
|
echoColor BD "tar -xf ${originPackageName}" && tar -xf ${originPackageName}
|
||||||
|
@ -222,24 +229,45 @@ fi
|
||||||
|
|
||||||
cd ${installPath}
|
cd ${installPath}
|
||||||
|
|
||||||
if ([[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "tar" ]]) || [[ ${packgeName} =~ "client" ]] ;then
|
if [[ ${packgeName} =~ "Lite" ]] || ([[ ${packgeName} =~ "x64" ]] && [[ ${packgeName} =~ "client" ]]) || ([[ ${packgeName} =~ "deb" ]] && [[ ${packgeName} =~ "server" ]]) || ([[ ${packgeName} =~ "rpm" ]] && [[ ${packgeName} =~ "server" ]]) ;then
|
||||||
echoColor G "===== install taos-tools when package is lite or client ====="
|
echoColor G "===== install taos-tools when package is lite or client ====="
|
||||||
cd ${installPath}
|
cd ${installPath}
|
||||||
sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz .
|
wgetFile taosTools-2.1.3-Linux-x64.tar.gz .
|
||||||
# wget https://www.taosdata.com/assets-download/3.0/taosTools-2.1.2-Linux-x64.tar.gz
|
tar xf taosTools-2.1.3-Linux-x64.tar.gz
|
||||||
tar xf taosTools-2.1.2-Linux-x64.tar.gz
|
cd taosTools-2.1.3 && bash install-taostools.sh
|
||||||
cd taosTools-2.1.2 && bash install-taostools.sh
|
elif ([[ ${packgeName} =~ "arm64" ]] && [[ ${packgeName} =~ "client" ]]);then
|
||||||
elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "deb" ]] ;then
|
echoColor G "===== install taos-tools arm when package is arm64-client ====="
|
||||||
echoColor G "===== install taos-tools when package is lite or client ====="
|
|
||||||
cd ${installPath}
|
cd ${installPath}
|
||||||
sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz .
|
wgetFile taosTools-2.1.3-Linux-arm64.tar.gz .
|
||||||
tar xf taosTools-2.1.2-Linux-x64.tar.gz
|
tar xf taosTools-2.1.3-Linux-arm64.tar.gz
|
||||||
cd taosTools-2.1.2 && bash install-taostools.sh
|
cd taosTools-2.1.3 && bash install-taostools.sh
|
||||||
elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "rpm" ]] ;then
|
|
||||||
echoColor G "===== install taos-tools when package is lite or client ====="
|
|
||||||
cd ${installPath}
|
|
||||||
sshpass -p ${password} scp -oStrictHostKeyChecking=no -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz .
|
|
||||||
tar xf taosTools-2.1.2-Linux-x64.tar.gz
|
|
||||||
cd taosTools-2.1.2 && bash install-taostools.sh
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
echoColor G "===== start TDengine ====="
|
||||||
|
|
||||||
|
if [[ ${packgeName} =~ "server" ]] ;then
|
||||||
|
echoColor BD " rm -rf /var/lib/taos/* && systemctl restart taosd "
|
||||||
|
rm -rf /var/lib/taos/*
|
||||||
|
systemctl restart taosd
|
||||||
|
fi
|
||||||
|
|
||||||
|
# if ([[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "tar" ]]) || [[ ${packgeName} =~ "client" ]] ;then
|
||||||
|
# echoColor G "===== install taos-tools when package is lite or client ====="
|
||||||
|
# cd ${installPath}
|
||||||
|
# wgetFile taosTools-2.1.2-Linux-x64.tar.gz .
|
||||||
|
# tar xf taosTools-2.1.2-Linux-x64.tar.gz
|
||||||
|
# cd taosTools-2.1.2 && bash install-taostools.sh
|
||||||
|
# elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "deb" ]] ;then
|
||||||
|
# echoColor G "===== install taos-tools when package is lite or client ====="
|
||||||
|
# cd ${installPath}
|
||||||
|
# wgetFile taosTools-2.1.2-Linux-x64.tar.gz .
|
||||||
|
# tar xf taosTools-2.1.2-Linux-x64.tar.gz
|
||||||
|
# cd taosTools-2.1.2 && bash install-taostools.sh
|
||||||
|
# elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "rpm" ]] ;then
|
||||||
|
# echoColor G "===== install taos-tools when package is lite or client ====="
|
||||||
|
# cd ${installPath}
|
||||||
|
# wgetFile taosTools-2.1.2-Linux-x64.tar.gz .
|
||||||
|
# tar xf taosTools-2.1.2-Linux-x64.tar.gz
|
||||||
|
# cd taosTools-2.1.2 && bash install-taostools.sh
|
||||||
|
# fi
|
||||||
|
|
||||||
|
|
|
@ -33,6 +33,7 @@ adapterName="taosadapter"
|
||||||
benchmarkName="taosBenchmark"
|
benchmarkName="taosBenchmark"
|
||||||
dumpName="taosdump"
|
dumpName="taosdump"
|
||||||
demoName="taosdemo"
|
demoName="taosdemo"
|
||||||
|
xname="taosx"
|
||||||
|
|
||||||
data_dir=${dataDir}
|
data_dir=${dataDir}
|
||||||
log_dir=${logDir}
|
log_dir=${logDir}
|
||||||
|
@ -199,6 +200,7 @@ function install_bin() {
|
||||||
${csudo}rm -f ${bin_link_dir}/${demoName} || :
|
${csudo}rm -f ${bin_link_dir}/${demoName} || :
|
||||||
${csudo}rm -f ${bin_link_dir}/${benchmarkName} || :
|
${csudo}rm -f ${bin_link_dir}/${benchmarkName} || :
|
||||||
${csudo}rm -f ${bin_link_dir}/${dumpName} || :
|
${csudo}rm -f ${bin_link_dir}/${dumpName} || :
|
||||||
|
${csudo}rm -f ${bin_link_dir}/${xname} || :
|
||||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||||
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
|
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
|
||||||
|
|
||||||
|
@ -212,6 +214,7 @@ function install_bin() {
|
||||||
[ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || :
|
[ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || :
|
||||||
[ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || :
|
[ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || :
|
||||||
[ -x ${install_main_dir}/bin/${dumpName} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${dumpName} || :
|
[ -x ${install_main_dir}/bin/${dumpName} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${dumpName} || :
|
||||||
|
[ -x ${install_main_dir}/bin/${xname} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${xname} || :
|
||||||
[ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
|
[ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
|
||||||
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
|
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
|
||||||
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
|
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
|
||||||
|
@ -837,14 +840,20 @@ function updateProduct() {
|
||||||
|
|
||||||
echo
|
echo
|
||||||
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${cfg_install_dir}/${configFile}"
|
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${cfg_install_dir}/${configFile}"
|
||||||
echo -e "${GREEN_DARK}To configure Adapter (if has) ${NC}: edit ${cfg_install_dir}/${adapterName}.toml"
|
[ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||||
|
echo -e "${GREEN_DARK}To configure Taos Adapter ${NC}: edit ${configDir}/taosadapter.toml"
|
||||||
if ((${service_mod} == 0)); then
|
if ((${service_mod} == 0)); then
|
||||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
|
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
|
||||||
|
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||||
|
echo -e "${GREEN_DARK}To start Taos Adatper ${NC}: ${csudo}systemctl start taosadapter ${NC}"
|
||||||
elif ((${service_mod} == 1)); then
|
elif ((${service_mod} == 1)); then
|
||||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
|
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
|
||||||
|
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||||
|
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}"
|
||||||
else
|
else
|
||||||
echo -e "${GREEN_DARK}To start Adapter (if has)${NC}: ${adapterName} &${NC}"
|
|
||||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ./${serverName}${NC}"
|
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ./${serverName}${NC}"
|
||||||
|
[ -f ${installDir}/bin/taosadapter ] && \
|
||||||
|
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ${openresty_work} = 'true' ]; then
|
if [ ${openresty_work} = 'true' ]; then
|
||||||
|
@ -923,14 +932,20 @@ function installProduct() {
|
||||||
# Ask if to start the service
|
# Ask if to start the service
|
||||||
echo
|
echo
|
||||||
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${cfg_install_dir}/${configFile}"
|
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${cfg_install_dir}/${configFile}"
|
||||||
echo -e "${GREEN_DARK}To configure ${adapterName} (if has) ${NC}: edit ${cfg_install_dir}/${adapterName}.toml"
|
[ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||||
|
echo -e "${GREEN_DARK}To configure Taos Adapter ${NC}: edit ${configDir}/taosadapter.toml"
|
||||||
if ((${service_mod} == 0)); then
|
if ((${service_mod} == 0)); then
|
||||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
|
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
|
||||||
|
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||||
|
echo -e "${GREEN_DARK}To start Taos Adatper ${NC}: ${csudo}systemctl start taosadapter ${NC}"
|
||||||
elif ((${service_mod} == 1)); then
|
elif ((${service_mod} == 1)); then
|
||||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
|
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
|
||||||
|
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||||
|
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}"
|
||||||
else
|
else
|
||||||
echo -e "${GREEN_DARK}To start Adapter (if has)${NC}: ${adapterName} &${NC}"
|
|
||||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}"
|
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}"
|
||||||
|
[ -f ${installDir}/bin/taosadapter ] && \
|
||||||
|
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -z "$firstEp" ]; then
|
if [ ! -z "$firstEp" ]; then
|
||||||
|
|
|
@ -172,6 +172,7 @@ function install_bin() {
|
||||||
${csudo}rm -f ${bin_link_dir}/udfd || :
|
${csudo}rm -f ${bin_link_dir}/udfd || :
|
||||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||||
${csudo}rm -f ${bin_link_dir}/taosdump || :
|
${csudo}rm -f ${bin_link_dir}/taosdump || :
|
||||||
|
${csudo}rm -f ${bin_link_dir}/taosx || :
|
||||||
|
|
||||||
if [ "$osType" != "Darwin" ]; then
|
if [ "$osType" != "Darwin" ]; then
|
||||||
${csudo}rm -f ${bin_link_dir}/perfMonitor || :
|
${csudo}rm -f ${bin_link_dir}/perfMonitor || :
|
||||||
|
@ -184,6 +185,7 @@ function install_bin() {
|
||||||
[ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || :
|
[ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || :
|
||||||
[ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || :
|
[ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || :
|
||||||
[ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || :
|
[ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || :
|
||||||
|
[ -f ${binary_dir}/build/bin/taosx ] && ${csudo}cp -r ${binary_dir}/build/bin/taosx ${install_main_dir}/bin || :
|
||||||
${csudo}cp -r ${binary_dir}/build/bin/${serverName} ${install_main_dir}/bin || :
|
${csudo}cp -r ${binary_dir}/build/bin/${serverName} ${install_main_dir}/bin || :
|
||||||
|
|
||||||
${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin || :
|
${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin || :
|
||||||
|
@ -199,6 +201,7 @@ function install_bin() {
|
||||||
[ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd || :
|
[ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd || :
|
||||||
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
|
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
|
||||||
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
|
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
|
||||||
|
[ -x ${install_main_dir}/bin/taosx ] && ${csudo}ln -s ${install_main_dir}/bin/taosx ${bin_link_dir}/taosx || :
|
||||||
[ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo}ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || :
|
[ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo}ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || :
|
||||||
[ -x ${install_main_dir}/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
|
[ -x ${install_main_dir}/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
|
||||||
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
|
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
|
||||||
|
@ -215,6 +218,7 @@ function install_bin() {
|
||||||
[ -x ${install_main_dir}/bin/udfd ] || [ -x ${install_main_2_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd || ${csudo}ln -s ${install_main_2_dir}/bin/udfd || :
|
[ -x ${install_main_dir}/bin/udfd ] || [ -x ${install_main_2_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd || ${csudo}ln -s ${install_main_2_dir}/bin/udfd || :
|
||||||
[ -x ${install_main_dir}/bin/taosdump ] || [ -x ${install_main_2_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || ln -s ${install_main_2_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
|
[ -x ${install_main_dir}/bin/taosdump ] || [ -x ${install_main_2_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || ln -s ${install_main_2_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
|
||||||
[ -x ${install_main_dir}/bin/taosdemo ] || [ -x ${install_main_2_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || ln -s ${install_main_2_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
|
[ -x ${install_main_dir}/bin/taosdemo ] || [ -x ${install_main_2_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || ln -s ${install_main_2_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
|
||||||
|
[ -x ${install_main_dir}/bin/taosx ] || [ -x ${install_main_2_dir}/bin/taosx ] && ${csudo}ln -s ${install_main_dir}/bin/taosx ${bin_link_dir}/taosx || ln -s ${install_main_2_dir}/bin/taosx ${bin_link_dir}/taosx || :
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -605,14 +609,20 @@ function update_TDengine() {
|
||||||
echo
|
echo
|
||||||
|
|
||||||
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}"
|
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}"
|
||||||
echo -e "${GREEN_DARK}To configure Taos Adapter (if has) ${NC}: edit ${configDir}/taosadapter.toml"
|
[ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||||
|
echo -e "${GREEN_DARK}To configure Taos Adapter ${NC}: edit ${configDir}/taosadapter.toml"
|
||||||
if ((${service_mod} == 0)); then
|
if ((${service_mod} == 0)); then
|
||||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
|
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
|
||||||
|
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||||
|
echo -e "${GREEN_DARK}To start Taos Adatper ${NC}: ${csudo}systemctl start taosadapter ${NC}"
|
||||||
elif ((${service_mod} == 1)); then
|
elif ((${service_mod} == 1)); then
|
||||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
|
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
|
||||||
|
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||||
|
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}"
|
||||||
else
|
else
|
||||||
echo -e "${GREEN_DARK}To start Taos Adapter (if has)${NC}: taosadapter &${NC}"
|
|
||||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}"
|
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}"
|
||||||
|
[ -f ${installDir}/bin/taosadapter ] && \
|
||||||
|
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName}${NC} in shell${NC}"
|
echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName}${NC} in shell${NC}"
|
||||||
|
@ -645,14 +655,20 @@ function install_TDengine() {
|
||||||
echo -e "\033[44;32;1m${productName} is installed successfully!${NC}"
|
echo -e "\033[44;32;1m${productName} is installed successfully!${NC}"
|
||||||
echo
|
echo
|
||||||
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}"
|
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}"
|
||||||
echo -e "${GREEN_DARK}To configure taosadapter (if has) ${NC}: edit ${configDir}/taosadapter.toml"
|
[ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||||
|
echo -e "${GREEN_DARK}To configure Taos Adapter ${NC}: edit ${configDir}/taosadapter.toml"
|
||||||
if ((${service_mod} == 0)); then
|
if ((${service_mod} == 0)); then
|
||||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
|
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
|
||||||
|
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||||
|
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}"
|
||||||
elif ((${service_mod} == 1)); then
|
elif ((${service_mod} == 1)); then
|
||||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
|
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
|
||||||
|
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||||
|
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}"
|
||||||
else
|
else
|
||||||
echo -e "${GREEN_DARK}To start Taos Adapter (if has)${NC}: taosadapter &${NC}"
|
|
||||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ./${serverName}${NC}"
|
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ./${serverName}${NC}"
|
||||||
|
[ -f ${installDir}/bin/taosadapter ] && \
|
||||||
|
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName}${NC} in shell${NC}"
|
echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName}${NC} in shell${NC}"
|
||||||
|
|
|
@ -80,10 +80,12 @@ else
|
||||||
${build_dir}/bin/taosBenchmark \
|
${build_dir}/bin/taosBenchmark \
|
||||||
${build_dir}/bin/TDinsight.sh \
|
${build_dir}/bin/TDinsight.sh \
|
||||||
$tdinsight_caches"
|
$tdinsight_caches"
|
||||||
|
[ -f ${build_dir}/bin/taosx ] && taosx_bin="${build_dir}/bin/taosx"
|
||||||
|
|
||||||
bin_files="${build_dir}/bin/${serverName} \
|
bin_files="${build_dir}/bin/${serverName} \
|
||||||
${build_dir}/bin/${clientName} \
|
${build_dir}/bin/${clientName} \
|
||||||
${taostools_bin_files} \
|
${taostools_bin_files} \
|
||||||
|
${taosx_bin} \
|
||||||
${build_dir}/bin/taosadapter \
|
${build_dir}/bin/taosadapter \
|
||||||
${build_dir}/bin/udfd \
|
${build_dir}/bin/udfd \
|
||||||
${script_dir}/remove.sh \
|
${script_dir}/remove.sh \
|
||||||
|
|
|
@ -489,7 +489,7 @@ int taos_options_imp(TSDB_OPTION option, const char *str) {
|
||||||
*/
|
*/
|
||||||
uint64_t generateRequestId() {
|
uint64_t generateRequestId() {
|
||||||
static uint64_t hashId = 0;
|
static uint64_t hashId = 0;
|
||||||
static int32_t requestSerialId = 0;
|
static uint32_t requestSerialId = 0;
|
||||||
|
|
||||||
if (hashId == 0) {
|
if (hashId == 0) {
|
||||||
char uid[64] = {0};
|
char uid[64] = {0};
|
||||||
|
@ -508,7 +508,8 @@ uint64_t generateRequestId() {
|
||||||
while (true) {
|
while (true) {
|
||||||
int64_t ts = taosGetTimestampMs();
|
int64_t ts = taosGetTimestampMs();
|
||||||
uint64_t pid = taosGetPId();
|
uint64_t pid = taosGetPId();
|
||||||
int32_t val = atomic_add_fetch_32(&requestSerialId, 1);
|
uint32_t val = atomic_add_fetch_32(&requestSerialId, 1);
|
||||||
|
if (val >= 0xFFFF) atomic_store_32(&requestSerialId, 0);
|
||||||
|
|
||||||
id = ((hashId & 0x0FFF) << 52) | ((pid & 0x0FFF) << 40) | ((ts & 0xFFFFFF) << 16) | (val & 0xFFFF);
|
id = ((hashId & 0x0FFF) << 52) | ((pid & 0x0FFF) << 40) | ((ts & 0xFFFFFF) << 16) | (val & 0xFFFF);
|
||||||
if (id) {
|
if (id) {
|
||||||
|
|
|
@ -73,6 +73,8 @@ static int32_t hbProcessDBInfoRsp(void *value, int32_t valueLen, struct SCatalog
|
||||||
|
|
||||||
vgInfo->vgVersion = rsp->vgVersion;
|
vgInfo->vgVersion = rsp->vgVersion;
|
||||||
vgInfo->hashMethod = rsp->hashMethod;
|
vgInfo->hashMethod = rsp->hashMethod;
|
||||||
|
vgInfo->hashPrefix = rsp->hashPrefix;
|
||||||
|
vgInfo->hashSuffix = rsp->hashSuffix;
|
||||||
vgInfo->vgHash = taosHashInit(rsp->vgNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
|
vgInfo->vgHash = taosHashInit(rsp->vgNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
|
||||||
if (NULL == vgInfo->vgHash) {
|
if (NULL == vgInfo->vgHash) {
|
||||||
taosMemoryFree(vgInfo);
|
taosMemoryFree(vgInfo);
|
||||||
|
@ -412,6 +414,9 @@ int32_t hbGetQueryBasicInfo(SClientHbKey *connKey, SClientHbReq *req) {
|
||||||
int32_t code = hbBuildQueryDesc(hbBasic, pTscObj);
|
int32_t code = hbBuildQueryDesc(hbBasic, pTscObj);
|
||||||
if (code) {
|
if (code) {
|
||||||
releaseTscObj(connKey->tscRid);
|
releaseTscObj(connKey->tscRid);
|
||||||
|
if (hbBasic->queryDesc) {
|
||||||
|
taosArrayDestroyEx(hbBasic->queryDesc, tFreeClientHbQueryDesc);
|
||||||
|
}
|
||||||
taosMemoryFree(hbBasic);
|
taosMemoryFree(hbBasic);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
|
@ -877,6 +877,7 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
|
||||||
pRequest->metric.resultReady = taosGetTimestampUs();
|
pRequest->metric.resultReady = taosGetTimestampUs();
|
||||||
|
|
||||||
if (pResult) {
|
if (pResult) {
|
||||||
|
destroyQueryExecRes(&pRequest->body.resInfo.execRes);
|
||||||
memcpy(&pRequest->body.resInfo.execRes, pResult, sizeof(*pResult));
|
memcpy(&pRequest->body.resInfo.execRes, pResult, sizeof(*pResult));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1408,6 +1409,7 @@ int32_t doProcessMsgFromServer(void* param) {
|
||||||
pSendInfo->fp(pSendInfo->param, &buf, pMsg->code);
|
pSendInfo->fp(pSendInfo->param, &buf, pMsg->code);
|
||||||
rpcFreeCont(pMsg->pCont);
|
rpcFreeCont(pMsg->pCont);
|
||||||
destroySendMsgInfo(pSendInfo);
|
destroySendMsgInfo(pSendInfo);
|
||||||
|
|
||||||
taosMemoryFree(arg);
|
taosMemoryFree(arg);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -1423,7 +1425,12 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
|
||||||
arg->msg = *pMsg;
|
arg->msg = *pMsg;
|
||||||
arg->pEpset = tEpSet;
|
arg->pEpset = tEpSet;
|
||||||
|
|
||||||
taosAsyncExec(doProcessMsgFromServer, arg, NULL);
|
if (0 != taosAsyncExec(doProcessMsgFromServer, arg, NULL)) {
|
||||||
|
tscError("failed to sched msg to tsc, tsc ready to quit");
|
||||||
|
rpcFreeCont(pMsg->pCont);
|
||||||
|
taosMemoryFree(arg->pEpset);
|
||||||
|
taosMemoryFree(arg);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TAOS* taos_connect_auth(const char* ip, const char* user, const char* auth, const char* db, uint16_t port) {
|
TAOS* taos_connect_auth(const char* ip, const char* user, const char* auth, const char* db, uint16_t port) {
|
||||||
|
@ -1696,7 +1703,12 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!needConvert) return TSDB_CODE_SUCCESS;
|
|
||||||
|
if (!needConvert) {
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
tscDebug("start to convert form json format string");
|
||||||
|
|
||||||
char* p = (char*)pResultInfo->pData;
|
char* p = (char*)pResultInfo->pData;
|
||||||
int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows);
|
int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows);
|
||||||
|
|
|
@ -871,11 +871,13 @@ static void fetchCallback(void *pResult, void *param, int32_t code) {
|
||||||
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
pRequest->code = code;
|
pRequest->code = code;
|
||||||
|
taosMemoryFreeClear(pResultInfo->pData);
|
||||||
pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
|
pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pRequest->code != TSDB_CODE_SUCCESS) {
|
if (pRequest->code != TSDB_CODE_SUCCESS) {
|
||||||
|
taosMemoryFreeClear(pResultInfo->pData);
|
||||||
pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
|
pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -765,23 +765,25 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
|
||||||
}
|
}
|
||||||
taosArrayPush(pRequest->tableList, &pName);
|
taosArrayPush(pRequest->tableList, &pName);
|
||||||
|
|
||||||
|
pCreateReq->flags |= TD_CREATE_IF_NOT_EXISTS;
|
||||||
// change tag cid to new cid
|
// change tag cid to new cid
|
||||||
if(pCreateReq->type == TSDB_CHILD_TABLE){
|
if (pCreateReq->type == TSDB_CHILD_TABLE) {
|
||||||
STableMeta* pTableMeta = NULL;
|
STableMeta* pTableMeta = NULL;
|
||||||
SName sName = {0};
|
SName sName = {0};
|
||||||
toName(pTscObj->acctId, pRequest->pDb, pCreateReq->ctb.name, &sName);
|
toName(pTscObj->acctId, pRequest->pDb, pCreateReq->ctb.name, &sName);
|
||||||
code = catalogGetTableMeta(pCatalog, &conn, &sName, &pTableMeta);
|
code = catalogGetTableMeta(pCatalog, &conn, &sName, &pTableMeta);
|
||||||
if(code != TSDB_CODE_SUCCESS){
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
uError("taosCreateTable:catalogGetTableMeta failed. table name: %s", pCreateReq->ctb.name);
|
uError("taosCreateTable:catalogGetTableMeta failed. table name: %s", pCreateReq->ctb.name);
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
for(int32_t i = 0; i < taosArrayGetSize(pCreateReq->ctb.tagName); i++){
|
for (int32_t i = 0; i < taosArrayGetSize(pCreateReq->ctb.tagName); i++) {
|
||||||
char* tName = taosArrayGet(pCreateReq->ctb.tagName, i);
|
char* tName = taosArrayGet(pCreateReq->ctb.tagName, i);
|
||||||
for(int32_t j = pTableMeta->tableInfo.numOfColumns; j < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; j++){
|
for (int32_t j = pTableMeta->tableInfo.numOfColumns;
|
||||||
SSchema *tag = &pTableMeta->schema[j];
|
j < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; j++) {
|
||||||
if(strcmp(tag->name, tName) == 0 && tag->type != TSDB_DATA_TYPE_JSON){
|
SSchema* tag = &pTableMeta->schema[j];
|
||||||
tTagSetCid((STag *)pCreateReq->ctb.pTag, i, tag->colId);
|
if (strcmp(tag->name, tName) == 0 && tag->type != TSDB_DATA_TYPE_JSON) {
|
||||||
|
tTagSetCid((STag*)pCreateReq->ctb.pTag, i, tag->colId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1322,12 +1324,12 @@ end:
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
|
static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
SHashObj* pVgHash = NULL;
|
SHashObj* pVgHash = NULL;
|
||||||
SQuery* pQuery = NULL;
|
SQuery* pQuery = NULL;
|
||||||
SMqRspObj rspObj = {0};
|
SMqRspObj rspObj = {0};
|
||||||
SDecoder decoder = {0};
|
SDecoder decoder = {0};
|
||||||
STableMeta* pTableMeta = NULL;
|
STableMeta* pTableMeta = NULL;
|
||||||
|
|
||||||
terrno = TSDB_CODE_SUCCESS;
|
terrno = TSDB_CODE_SUCCESS;
|
||||||
|
@ -1405,7 +1407,7 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
|
||||||
}
|
}
|
||||||
|
|
||||||
code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
|
code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
|
||||||
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST){
|
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
|
||||||
uError("WriteRaw:catalogGetTableMeta table not exist. table name: %s", tbName);
|
uError("WriteRaw:catalogGetTableMeta table not exist. table name: %s", tbName);
|
||||||
code = TSDB_CODE_SUCCESS;
|
code = TSDB_CODE_SUCCESS;
|
||||||
continue;
|
continue;
|
||||||
|
@ -1466,7 +1468,7 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// pSW->pSchema should be same as pTableMeta->schema
|
// pSW->pSchema should be same as pTableMeta->schema
|
||||||
// ASSERT(pSW->nCols == pTableMeta->tableInfo.numOfColumns);
|
// ASSERT(pSW->nCols == pTableMeta->tableInfo.numOfColumns);
|
||||||
uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
|
uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
|
||||||
uint64_t uid = pTableMeta->uid;
|
uint64_t uid = pTableMeta->uid;
|
||||||
int16_t sver = pTableMeta->sversion;
|
int16_t sver = pTableMeta->sversion;
|
||||||
|
@ -1494,10 +1496,10 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
|
||||||
int32_t offset = 0;
|
int32_t offset = 0;
|
||||||
for (int32_t k = 0; k < pTableMeta->tableInfo.numOfColumns; k++) {
|
for (int32_t k = 0; k < pTableMeta->tableInfo.numOfColumns; k++) {
|
||||||
const SSchema* pColumn = &pTableMeta->schema[k];
|
const SSchema* pColumn = &pTableMeta->schema[k];
|
||||||
int32_t* index = taosHashGet(schemaHash, pColumn->name, strlen(pColumn->name));
|
int32_t* index = taosHashGet(schemaHash, pColumn->name, strlen(pColumn->name));
|
||||||
if(!index){
|
if (!index) {
|
||||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
|
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
|
||||||
}else{
|
} else {
|
||||||
char* colData = rspObj.resInfo.row[*index];
|
char* colData = rspObj.resInfo.row[*index];
|
||||||
if (!colData) {
|
if (!colData) {
|
||||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
|
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
|
||||||
|
@ -1668,7 +1670,7 @@ int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) {
|
||||||
} else if (raw.raw_type == TDMT_VND_DELETE) {
|
} else if (raw.raw_type == TDMT_VND_DELETE) {
|
||||||
return taosDeleteData(taos, raw.raw, raw.raw_len);
|
return taosDeleteData(taos, raw.raw, raw.raw_len);
|
||||||
} else if (raw.raw_type == RES_TYPE__TMQ) {
|
} else if (raw.raw_type == RES_TYPE__TMQ) {
|
||||||
return tmqWriteRaw(taos, raw.raw, raw.raw_len);
|
return tmqWriteRawDataImpl(taos, raw.raw, raw.raw_len);
|
||||||
}
|
}
|
||||||
return TSDB_CODE_INVALID_PARA;
|
return TSDB_CODE_INVALID_PARA;
|
||||||
}
|
}
|
|
@ -537,7 +537,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
||||||
|
|
||||||
code = smlSendMetaMsg(info, &pName, pColumns, pTags, pTableMeta, action);
|
code = smlSendMetaMsg(info, &pName, pColumns, pTags, pTableMeta, action);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, superTable);
|
uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname);
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -547,6 +547,8 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
needCheckMeta = true;
|
needCheckMeta = true;
|
||||||
|
taosHashCleanup(hashTmp);
|
||||||
|
hashTmp = NULL;
|
||||||
} else {
|
} else {
|
||||||
uError("SML:0x%" PRIx64 " load table meta error: %s", info->id, tstrerror(code));
|
uError("SML:0x%" PRIx64 " load table meta error: %s", info->id, tstrerror(code));
|
||||||
goto end;
|
goto end;
|
||||||
|
@ -555,7 +557,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
||||||
|
|
||||||
code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta);
|
code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
uError("SML:0x%" PRIx64 " catalogGetSTableMeta failed. super table name %s", info->id, (char *)superTable);
|
uError("SML:0x%" PRIx64 " catalogGetSTableMeta failed. super table name %s", info->id, pName.tname);
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -563,12 +565,12 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
||||||
code = smlCheckMeta(&(pTableMeta->schema[pTableMeta->tableInfo.numOfColumns]), pTableMeta->tableInfo.numOfTags,
|
code = smlCheckMeta(&(pTableMeta->schema[pTableMeta->tableInfo.numOfColumns]), pTableMeta->tableInfo.numOfTags,
|
||||||
sTableData->tags, true);
|
sTableData->tags, true);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
uError("SML:0x%" PRIx64 " check tag failed. super table name %s", info->id, (char *)superTable);
|
uError("SML:0x%" PRIx64 " check tag failed. super table name %s", info->id, pName.tname);
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
code = smlCheckMeta(&(pTableMeta->schema[0]), pTableMeta->tableInfo.numOfColumns, sTableData->cols, false);
|
code = smlCheckMeta(&(pTableMeta->schema[0]), pTableMeta->tableInfo.numOfColumns, sTableData->cols, false);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
uError("SML:0x%" PRIx64 " check cols failed. super table name %s", info->id, (char *)superTable);
|
uError("SML:0x%" PRIx64 " check cols failed. super table name %s", info->id, pName.tname);
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -576,7 +578,6 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
||||||
sTableData->tableMeta = pTableMeta;
|
sTableData->tableMeta = pTableMeta;
|
||||||
|
|
||||||
tableMetaSml = (SSmlSTableMeta **)taosHashIterate(info->superTables, tableMetaSml);
|
tableMetaSml = (SSmlSTableMeta **)taosHashIterate(info->superTables, tableMetaSml);
|
||||||
taosHashCleanup(hashTmp);
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -1559,7 +1560,7 @@ cleanup:
|
||||||
|
|
||||||
/************* TSDB_SML_JSON_PROTOCOL function start **************/
|
/************* TSDB_SML_JSON_PROTOCOL function start **************/
|
||||||
static int32_t smlJsonCreateSring(const char **output, char *input, int32_t inputLen) {
|
static int32_t smlJsonCreateSring(const char **output, char *input, int32_t inputLen) {
|
||||||
*output = (const char *)taosMemoryMalloc(inputLen);
|
*output = (const char *)taosMemoryCalloc(1, inputLen);
|
||||||
if (*output == NULL) {
|
if (*output == NULL) {
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
@ -2450,9 +2451,11 @@ static void smlInsertCallback(void *param, void *res, int32_t code) {
|
||||||
uDebug("SML:0x%" PRIx64 " result. code:%d, msg:%s", info->id, pRequest->code, pRequest->msgBuf);
|
uDebug("SML:0x%" PRIx64 " result. code:%d, msg:%s", info->id, pRequest->code, pRequest->msgBuf);
|
||||||
// lock
|
// lock
|
||||||
taosThreadSpinLock(&info->params->lock);
|
taosThreadSpinLock(&info->params->lock);
|
||||||
info->params->request->body.resInfo.numOfRows += rows;
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
info->params->request->code = code;
|
info->params->request->code = code;
|
||||||
|
info->params->request->body.resInfo.numOfRows += rows;
|
||||||
|
}else{
|
||||||
|
info->params->request->body.resInfo.numOfRows += info->affectedRows;
|
||||||
}
|
}
|
||||||
taosThreadSpinUnlock(&info->params->lock);
|
taosThreadSpinUnlock(&info->params->lock);
|
||||||
// unlock
|
// unlock
|
||||||
|
|
|
@ -90,7 +90,7 @@ static const SSysDbTableSchema userDBSchema[] = {
|
||||||
{.name = "minrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
{.name = "minrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
||||||
{.name = "maxrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
{.name = "maxrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
||||||
{.name = "comp", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true},
|
{.name = "comp", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true},
|
||||||
{.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
{.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||||
{.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
{.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||||
{.name = "retentions", .bytes = 60 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
{.name = "retentions", .bytes = 60 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
||||||
{.name = "single_stable", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL, .sysInfo = true},
|
{.name = "single_stable", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL, .sysInfo = true},
|
||||||
|
@ -102,6 +102,10 @@ static const SSysDbTableSchema userDBSchema[] = {
|
||||||
{.name = "wal_retention_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
|
{.name = "wal_retention_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
|
||||||
{.name = "wal_roll_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
{.name = "wal_roll_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
||||||
{.name = "wal_segment_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
|
{.name = "wal_segment_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
|
||||||
|
{.name = "stt_trigger", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
|
||||||
|
{.name = "table_prefix", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
|
||||||
|
{.name = "table_suffix", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
|
||||||
|
{.name = "tsdb_pagesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
||||||
};
|
};
|
||||||
|
|
||||||
static const SSysDbTableSchema userFuncSchema[] = {
|
static const SSysDbTableSchema userFuncSchema[] = {
|
||||||
|
@ -206,6 +210,7 @@ static const SSysDbTableSchema vgroupsSchema[] = {
|
||||||
{.name = "v3_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
{.name = "v3_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
||||||
{.name = "v3_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
{.name = "v3_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
||||||
{.name = "status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
{.name = "status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
||||||
|
{.name = "cacheload", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
||||||
{.name = "nfiles", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
{.name = "nfiles", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
||||||
{.name = "file_size", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
{.name = "file_size", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
||||||
{.name = "tsma", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true},
|
{.name = "tsma", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true},
|
||||||
|
@ -256,6 +261,15 @@ static const SSysDbTableSchema subscriptionSchema[] = {
|
||||||
{.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
|
{.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const SSysDbTableSchema vnodesSchema[] = {
|
||||||
|
{.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
||||||
|
{.name = "replica", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true},
|
||||||
|
{.name = "status", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
||||||
|
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
||||||
|
{.name = "dnode_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
|
||||||
|
{.name = "dnode_ep", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
||||||
|
};
|
||||||
|
|
||||||
static const SSysTableMeta infosMeta[] = {
|
static const SSysTableMeta infosMeta[] = {
|
||||||
{TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema), true},
|
{TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema), true},
|
||||||
{TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true},
|
{TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true},
|
||||||
|
@ -279,6 +293,7 @@ static const SSysTableMeta infosMeta[] = {
|
||||||
{TSDB_INS_TABLE_TOPICS, topicSchema, tListLen(topicSchema), false},
|
{TSDB_INS_TABLE_TOPICS, topicSchema, tListLen(topicSchema), false},
|
||||||
{TSDB_INS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema), false},
|
{TSDB_INS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema), false},
|
||||||
{TSDB_INS_TABLE_STREAMS, streamSchema, tListLen(streamSchema), false},
|
{TSDB_INS_TABLE_STREAMS, streamSchema, tListLen(streamSchema), false},
|
||||||
|
{TSDB_INS_TABLE_VNODES, vnodesSchema, tListLen(vnodesSchema), true},
|
||||||
};
|
};
|
||||||
|
|
||||||
static const SSysDbTableSchema connectionsSchema[] = {
|
static const SSysDbTableSchema connectionsSchema[] = {
|
||||||
|
|
|
@ -140,7 +140,8 @@ int32_t colDataReserve(SColumnInfoData* pColumnInfoData, size_t newSize) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t currentRow, const char* pData, int32_t itemLen, int32_t numOfRows) {
|
static void doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t currentRow, const char* pData,
|
||||||
|
int32_t itemLen, int32_t numOfRows) {
|
||||||
ASSERT(pColumnInfoData->info.bytes >= itemLen);
|
ASSERT(pColumnInfoData->info.bytes >= itemLen);
|
||||||
size_t start = 1;
|
size_t start = 1;
|
||||||
|
|
||||||
|
@ -148,21 +149,23 @@ static void doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t curren
|
||||||
memcpy(pColumnInfoData->pData, pData, itemLen);
|
memcpy(pColumnInfoData->pData, pData, itemLen);
|
||||||
|
|
||||||
int32_t t = 0;
|
int32_t t = 0;
|
||||||
int32_t count = log(numOfRows)/log(2);
|
int32_t count = log(numOfRows) / log(2);
|
||||||
while(t < count) {
|
while (t < count) {
|
||||||
int32_t xlen = 1 << t;
|
int32_t xlen = 1 << t;
|
||||||
memcpy(pColumnInfoData->pData + start * itemLen + pColumnInfoData->varmeta.length, pColumnInfoData->pData, xlen * itemLen);
|
memcpy(pColumnInfoData->pData + start * itemLen + pColumnInfoData->varmeta.length, pColumnInfoData->pData,
|
||||||
|
xlen * itemLen);
|
||||||
t += 1;
|
t += 1;
|
||||||
start += xlen;
|
start += xlen;
|
||||||
}
|
}
|
||||||
|
|
||||||
// the tail part
|
// the tail part
|
||||||
if (numOfRows > start) {
|
if (numOfRows > start) {
|
||||||
memcpy(pColumnInfoData->pData + start * itemLen + currentRow * itemLen, pColumnInfoData->pData, (numOfRows - start) * itemLen);
|
memcpy(pColumnInfoData->pData + start * itemLen + currentRow * itemLen, pColumnInfoData->pData,
|
||||||
|
(numOfRows - start) * itemLen);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
|
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
|
||||||
for(int32_t i = 0; i < numOfRows; ++i) {
|
for (int32_t i = 0; i < numOfRows; ++i) {
|
||||||
pColumnInfoData->varmeta.offset[i + currentRow] = pColumnInfoData->varmeta.length + i * itemLen;
|
pColumnInfoData->varmeta.offset[i + currentRow] = pColumnInfoData->varmeta.length + i * itemLen;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -170,7 +173,8 @@ static void doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t curren
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t colDataAppendNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, uint32_t numOfRows) {
|
int32_t colDataAppendNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData,
|
||||||
|
uint32_t numOfRows) {
|
||||||
ASSERT(pData != NULL && pColumnInfoData != NULL);
|
ASSERT(pData != NULL && pColumnInfoData != NULL);
|
||||||
|
|
||||||
int32_t len = pColumnInfoData->info.bytes;
|
int32_t len = pColumnInfoData->info.bytes;
|
||||||
|
@ -278,7 +282,7 @@ int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int
|
||||||
} else {
|
} else {
|
||||||
if (finalNumOfRows > *capacity || (numOfRow1 == 0 && pColumnInfoData->info.bytes != 0)) {
|
if (finalNumOfRows > *capacity || (numOfRow1 == 0 && pColumnInfoData->info.bytes != 0)) {
|
||||||
// all data may be null, when the pColumnInfoData->info.type == 0, bytes == 0;
|
// all data may be null, when the pColumnInfoData->info.type == 0, bytes == 0;
|
||||||
// ASSERT(finalNumOfRows * pColumnInfoData->info.bytes);
|
// ASSERT(finalNumOfRows * pColumnInfoData->info.bytes);
|
||||||
char* tmp = taosMemoryRealloc(pColumnInfoData->pData, finalNumOfRows * pColumnInfoData->info.bytes);
|
char* tmp = taosMemoryRealloc(pColumnInfoData->pData, finalNumOfRows * pColumnInfoData->info.bytes);
|
||||||
if (tmp == NULL) {
|
if (tmp == NULL) {
|
||||||
return TSDB_CODE_VND_OUT_OF_MEMORY;
|
return TSDB_CODE_VND_OUT_OF_MEMORY;
|
||||||
|
@ -557,7 +561,7 @@ int32_t blockDataToBuf(char* buf, const SSDataBlock* pBlock) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t blockDataFromBuf(SSDataBlock* pBlock, const char* buf) {
|
int32_t blockDataFromBuf(SSDataBlock* pBlock, const char* buf) {
|
||||||
int32_t numOfRows = *(int32_t*) buf;
|
int32_t numOfRows = *(int32_t*)buf;
|
||||||
blockDataEnsureCapacity(pBlock, numOfRows);
|
blockDataEnsureCapacity(pBlock, numOfRows);
|
||||||
|
|
||||||
pBlock->info.rows = numOfRows;
|
pBlock->info.rows = numOfRows;
|
||||||
|
@ -676,7 +680,8 @@ size_t blockDataGetRowSize(SSDataBlock* pBlock) {
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
size_t blockDataGetSerialMetaSize(uint32_t numOfCols) {
|
size_t blockDataGetSerialMetaSize(uint32_t numOfCols) {
|
||||||
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column length |
|
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
|
||||||
|
// length |
|
||||||
return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(uint64_t) +
|
return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(uint64_t) +
|
||||||
numOfCols * (sizeof(int8_t) + sizeof(int32_t)) + numOfCols * sizeof(int32_t);
|
numOfCols * (sizeof(int8_t) + sizeof(int32_t)) + numOfCols * sizeof(int32_t);
|
||||||
}
|
}
|
||||||
|
@ -1302,6 +1307,40 @@ int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SSDataBlock* createSpecialDataBlock(EStreamType type) {
|
||||||
|
SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock));
|
||||||
|
pBlock->info.hasVarCol = false;
|
||||||
|
pBlock->info.groupId = 0;
|
||||||
|
pBlock->info.rows = 0;
|
||||||
|
pBlock->info.type = type;
|
||||||
|
pBlock->info.rowSize =
|
||||||
|
sizeof(TSKEY) + sizeof(TSKEY) + sizeof(uint64_t) + sizeof(uint64_t) + sizeof(TSKEY) + sizeof(TSKEY);
|
||||||
|
pBlock->info.watermark = INT64_MIN;
|
||||||
|
|
||||||
|
pBlock->pDataBlock = taosArrayInit(6, sizeof(SColumnInfoData));
|
||||||
|
SColumnInfoData infoData = {0};
|
||||||
|
infoData.info.type = TSDB_DATA_TYPE_TIMESTAMP;
|
||||||
|
infoData.info.bytes = sizeof(TSKEY);
|
||||||
|
// window start ts
|
||||||
|
taosArrayPush(pBlock->pDataBlock, &infoData);
|
||||||
|
// window end ts
|
||||||
|
taosArrayPush(pBlock->pDataBlock, &infoData);
|
||||||
|
|
||||||
|
infoData.info.type = TSDB_DATA_TYPE_UBIGINT;
|
||||||
|
infoData.info.bytes = sizeof(uint64_t);
|
||||||
|
// uid
|
||||||
|
taosArrayPush(pBlock->pDataBlock, &infoData);
|
||||||
|
// group id
|
||||||
|
taosArrayPush(pBlock->pDataBlock, &infoData);
|
||||||
|
|
||||||
|
// calculate start ts
|
||||||
|
taosArrayPush(pBlock->pDataBlock, &infoData);
|
||||||
|
// calculate end ts
|
||||||
|
taosArrayPush(pBlock->pDataBlock, &infoData);
|
||||||
|
|
||||||
|
return pBlock;
|
||||||
|
}
|
||||||
|
|
||||||
SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData) {
|
SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData) {
|
||||||
if (pDataBlock == NULL) {
|
if (pDataBlock == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -1426,7 +1465,7 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void colDataDestroy(SColumnInfoData* pColData) {
|
void colDataDestroy(SColumnInfoData* pColData) {
|
||||||
if(!pColData) return;
|
if (!pColData) return;
|
||||||
if (IS_VAR_DATA_TYPE(pColData->info.type)) {
|
if (IS_VAR_DATA_TYPE(pColData->info.type)) {
|
||||||
taosMemoryFreeClear(pColData->varmeta.offset);
|
taosMemoryFreeClear(pColData->varmeta.offset);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1693,7 +1732,7 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
|
||||||
}
|
}
|
||||||
struct tm ptm = {0};
|
struct tm ptm = {0};
|
||||||
taosLocalTime(&tt, &ptm);
|
taosLocalTime(&tt, &ptm);
|
||||||
size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm);
|
size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm);
|
||||||
|
|
||||||
if (precision == TSDB_TIME_PRECISION_NANO) {
|
if (precision == TSDB_TIME_PRECISION_NANO) {
|
||||||
sprintf(buf + pos, ".%09d", ms);
|
sprintf(buf + pos, ".%09d", ms);
|
||||||
|
@ -1847,20 +1886,20 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_VARCHAR: {
|
case TSDB_DATA_TYPE_VARCHAR: {
|
||||||
memset(pBuf, 0, sizeof(pBuf));
|
memset(pBuf, 0, sizeof(pBuf));
|
||||||
char* pData = colDataGetVarData(pColInfoData, j);
|
char* pData = colDataGetVarData(pColInfoData, j);
|
||||||
int32_t dataSize = TMIN(sizeof(pBuf), varDataLen(pData));
|
int32_t dataSize = TMIN(sizeof(pBuf), varDataLen(pData));
|
||||||
memcpy(pBuf, varDataVal(pData), dataSize);
|
memcpy(pBuf, varDataVal(pData), dataSize);
|
||||||
len += snprintf(dumpBuf + len, size - len, " %15s |", pBuf);
|
len += snprintf(dumpBuf + len, size - len, " %15s |", pBuf);
|
||||||
if (len >= size - 1) return dumpBuf;
|
if (len >= size - 1) return dumpBuf;
|
||||||
} break;
|
} break;
|
||||||
case TSDB_DATA_TYPE_NCHAR: {
|
case TSDB_DATA_TYPE_NCHAR: {
|
||||||
char* pData = colDataGetVarData(pColInfoData, j);
|
char* pData = colDataGetVarData(pColInfoData, j);
|
||||||
int32_t dataSize = TMIN(sizeof(pBuf), varDataLen(pData));
|
int32_t dataSize = TMIN(sizeof(pBuf), varDataLen(pData));
|
||||||
memset(pBuf, 0, sizeof(pBuf));
|
memset(pBuf, 0, sizeof(pBuf));
|
||||||
taosUcs4ToMbs((TdUcs4 *)varDataVal(pData), dataSize, pBuf);
|
taosUcs4ToMbs((TdUcs4*)varDataVal(pData), dataSize, pBuf);
|
||||||
len += snprintf(dumpBuf + len, size - len, " %15s |", pBuf);
|
len += snprintf(dumpBuf + len, size - len, " %15s |", pBuf);
|
||||||
if (len >= size - 1) return dumpBuf;
|
if (len >= size - 1) return dumpBuf;
|
||||||
} break;
|
} break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
len += snprintf(dumpBuf + len, size - len, "\n");
|
len += snprintf(dumpBuf + len, size - len, "\n");
|
||||||
|
@ -1877,7 +1916,7 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
|
||||||
* @param pDataBlocks
|
* @param pDataBlocks
|
||||||
* @param vgId
|
* @param vgId
|
||||||
* @param suid
|
* @param suid
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlock, STSchema* pTSchema, int32_t vgId,
|
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlock, STSchema* pTSchema, int32_t vgId,
|
||||||
tb_uid_t suid) {
|
tb_uid_t suid) {
|
||||||
|
@ -1904,8 +1943,8 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataB
|
||||||
tdSRowInit(&rb, pTSchema->version);
|
tdSRowInit(&rb, pTSchema->version);
|
||||||
|
|
||||||
for (int32_t i = 0; i < sz; ++i) {
|
for (int32_t i = 0; i < sz; ++i) {
|
||||||
int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
|
int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||||
int32_t rows = pDataBlock->info.rows;
|
int32_t rows = pDataBlock->info.rows;
|
||||||
// int32_t rowSize = pDataBlock->info.rowSize;
|
// int32_t rowSize = pDataBlock->info.rowSize;
|
||||||
// int64_t groupId = pDataBlock->info.groupId;
|
// int64_t groupId = pDataBlock->info.groupId;
|
||||||
|
|
||||||
|
@ -1926,7 +1965,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataB
|
||||||
|
|
||||||
msgLen += sizeof(SSubmitBlk);
|
msgLen += sizeof(SSubmitBlk);
|
||||||
int32_t dataLen = 0;
|
int32_t dataLen = 0;
|
||||||
for (int32_t j = 0; j < rows; ++j) { // iterate by row
|
for (int32_t j = 0; j < rows; ++j) { // iterate by row
|
||||||
tdSRowResetBuf(&rb, POINTER_SHIFT(pDataBuf, msgLen + dataLen)); // set row buf
|
tdSRowResetBuf(&rb, POINTER_SHIFT(pDataBuf, msgLen + dataLen)); // set row buf
|
||||||
bool isStartKey = false;
|
bool isStartKey = false;
|
||||||
int32_t offset = 0;
|
int32_t offset = 0;
|
||||||
|
@ -2081,6 +2120,7 @@ void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_
|
||||||
int32_t* rows = (int32_t*)data;
|
int32_t* rows = (int32_t*)data;
|
||||||
*rows = pBlock->info.rows;
|
*rows = pBlock->info.rows;
|
||||||
data += sizeof(int32_t);
|
data += sizeof(int32_t);
|
||||||
|
ASSERT(*rows > 0);
|
||||||
|
|
||||||
int32_t* cols = (int32_t*)data;
|
int32_t* cols = (int32_t*)data;
|
||||||
*cols = numOfCols;
|
*cols = numOfCols;
|
||||||
|
@ -2089,7 +2129,7 @@ void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_
|
||||||
// flag segment.
|
// flag segment.
|
||||||
// the inital bit is for column info
|
// the inital bit is for column info
|
||||||
int32_t* flagSegment = (int32_t*)data;
|
int32_t* flagSegment = (int32_t*)data;
|
||||||
*flagSegment = (1<<31);
|
*flagSegment = (1 << 31);
|
||||||
|
|
||||||
data += sizeof(int32_t);
|
data += sizeof(int32_t);
|
||||||
|
|
||||||
|
@ -2144,12 +2184,14 @@ void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_
|
||||||
|
|
||||||
*actualLen = *dataLen;
|
*actualLen = *dataLen;
|
||||||
*groupId = pBlock->info.groupId;
|
*groupId = pBlock->info.groupId;
|
||||||
|
ASSERT(*dataLen > 0);
|
||||||
|
uDebug("build data block, actualLen:%d, rows:%d, cols:%d", *dataLen, *rows, *cols);
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
|
const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
|
||||||
const char* pStart = pData;
|
const char* pStart = pData;
|
||||||
|
|
||||||
int32_t version = *(int32_t*) pStart;
|
int32_t version = *(int32_t*)pStart;
|
||||||
pStart += sizeof(int32_t);
|
pStart += sizeof(int32_t);
|
||||||
ASSERT(version == 1);
|
ASSERT(version == 1);
|
||||||
|
|
||||||
|
@ -2158,7 +2200,7 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
|
||||||
pStart += sizeof(int32_t);
|
pStart += sizeof(int32_t);
|
||||||
|
|
||||||
// total rows sizeof(int32_t)
|
// total rows sizeof(int32_t)
|
||||||
int32_t numOfRows = *(int32_t*)pStart;
|
int32_t numOfRows = *(int32_t*)pStart;
|
||||||
pStart += sizeof(int32_t);
|
pStart += sizeof(int32_t);
|
||||||
|
|
||||||
// total columns sizeof(int32_t)
|
// total columns sizeof(int32_t)
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
|
|
||||||
#define _DEFAULT_SOURCE
|
#define _DEFAULT_SOURCE
|
||||||
#include "tdataformat.h"
|
#include "tdataformat.h"
|
||||||
|
#include "tRealloc.h"
|
||||||
#include "tcoding.h"
|
#include "tcoding.h"
|
||||||
#include "tdatablock.h"
|
#include "tdatablock.h"
|
||||||
#include "tlog.h"
|
#include "tlog.h"
|
||||||
|
@ -680,7 +681,7 @@ int32_t tGetTSRow(uint8_t *p, STSRow2 **ppRow) {
|
||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
|
|
||||||
// STSchema
|
// STSchema ========================================
|
||||||
int32_t tTSchemaCreate(int32_t sver, SSchema *pSchema, int32_t ncols, STSchema **ppTSchema) {
|
int32_t tTSchemaCreate(int32_t sver, SSchema *pSchema, int32_t ncols, STSchema **ppTSchema) {
|
||||||
*ppTSchema = (STSchema *)taosMemoryMalloc(sizeof(STSchema) + sizeof(STColumn) * ncols);
|
*ppTSchema = (STSchema *)taosMemoryMalloc(sizeof(STSchema) + sizeof(STColumn) * ncols);
|
||||||
if (*ppTSchema == NULL) {
|
if (*ppTSchema == NULL) {
|
||||||
|
@ -720,9 +721,7 @@ void tTSchemaDestroy(STSchema *pTSchema) {
|
||||||
if (pTSchema) taosMemoryFree(pTSchema);
|
if (pTSchema) taosMemoryFree(pTSchema);
|
||||||
}
|
}
|
||||||
|
|
||||||
// STSRowBuilder
|
// STag ========================================
|
||||||
|
|
||||||
// STag
|
|
||||||
static int tTagValCmprFn(const void *p1, const void *p2) {
|
static int tTagValCmprFn(const void *p1, const void *p2) {
|
||||||
if (((STagVal *)p1)->cid < ((STagVal *)p2)->cid) {
|
if (((STagVal *)p1)->cid < ((STagVal *)p2)->cid) {
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -1172,4 +1171,495 @@ STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder) {
|
||||||
return pSchema;
|
return pSchema;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// SColData ========================================
|
||||||
|
void tColDataDestroy(void *ph) {
|
||||||
|
SColData *pColData = (SColData *)ph;
|
||||||
|
|
||||||
|
tFree(pColData->pBitMap);
|
||||||
|
tFree((uint8_t *)pColData->aOffset);
|
||||||
|
tFree(pColData->pData);
|
||||||
|
}
|
||||||
|
|
||||||
|
void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t smaOn) {
|
||||||
|
pColData->cid = cid;
|
||||||
|
pColData->type = type;
|
||||||
|
pColData->smaOn = smaOn;
|
||||||
|
tColDataClear(pColData);
|
||||||
|
}
|
||||||
|
|
||||||
|
void tColDataClear(SColData *pColData) {
|
||||||
|
pColData->nVal = 0;
|
||||||
|
pColData->flag = 0;
|
||||||
|
pColData->nData = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static FORCE_INLINE int32_t tColDataPutValue(SColData *pColData, SColVal *pColVal) {
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
|
if (IS_VAR_DATA_TYPE(pColData->type)) {
|
||||||
|
code = tRealloc((uint8_t **)(&pColData->aOffset), sizeof(int32_t) * (pColData->nVal + 1));
|
||||||
|
if (code) goto _exit;
|
||||||
|
pColData->aOffset[pColData->nVal] = pColData->nData;
|
||||||
|
|
||||||
|
if (pColVal->value.nData) {
|
||||||
|
code = tRealloc(&pColData->pData, pColData->nData + pColVal->value.nData);
|
||||||
|
if (code) goto _exit;
|
||||||
|
memcpy(pColData->pData + pColData->nData, pColVal->value.pData, pColVal->value.nData);
|
||||||
|
pColData->nData += pColVal->value.nData;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ASSERT(pColData->nData == tDataTypes[pColData->type].bytes * pColData->nVal);
|
||||||
|
code = tRealloc(&pColData->pData, pColData->nData + tDataTypes[pColData->type].bytes);
|
||||||
|
if (code) goto _exit;
|
||||||
|
pColData->nData += tPutValue(pColData->pData + pColData->nData, &pColVal->value, pColVal->type);
|
||||||
|
}
|
||||||
|
|
||||||
|
_exit:
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
static FORCE_INLINE int32_t tColDataAppendValue0(SColData *pColData, SColVal *pColVal) { // 0
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
|
if (pColVal->isNone) {
|
||||||
|
pColData->flag = HAS_NONE;
|
||||||
|
} else if (pColVal->isNull) {
|
||||||
|
pColData->flag = HAS_NULL;
|
||||||
|
} else {
|
||||||
|
pColData->flag = HAS_VALUE;
|
||||||
|
code = tColDataPutValue(pColData, pColVal);
|
||||||
|
if (code) goto _exit;
|
||||||
|
}
|
||||||
|
pColData->nVal++;
|
||||||
|
|
||||||
|
_exit:
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
static FORCE_INLINE int32_t tColDataAppendValue1(SColData *pColData, SColVal *pColVal) { // HAS_NONE
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
|
if (!pColVal->isNone) {
|
||||||
|
int32_t nBit = BIT1_SIZE(pColData->nVal + 1);
|
||||||
|
|
||||||
|
code = tRealloc(&pColData->pBitMap, nBit);
|
||||||
|
if (code) goto _exit;
|
||||||
|
|
||||||
|
memset(pColData->pBitMap, 0, nBit);
|
||||||
|
SET_BIT1(pColData->pBitMap, pColData->nVal, 1);
|
||||||
|
|
||||||
|
if (pColVal->isNull) {
|
||||||
|
pColData->flag |= HAS_NULL;
|
||||||
|
} else {
|
||||||
|
pColData->flag |= HAS_VALUE;
|
||||||
|
|
||||||
|
if (pColData->nVal) {
|
||||||
|
if (IS_VAR_DATA_TYPE(pColData->type)) {
|
||||||
|
int32_t nOffset = sizeof(int32_t) * pColData->nVal;
|
||||||
|
code = tRealloc((uint8_t **)(&pColData->aOffset), nOffset);
|
||||||
|
if (code) goto _exit;
|
||||||
|
memset(pColData->aOffset, 0, nOffset);
|
||||||
|
} else {
|
||||||
|
pColData->nData = tDataTypes[pColData->type].bytes * pColData->nVal;
|
||||||
|
code = tRealloc(&pColData->pData, pColData->nData);
|
||||||
|
if (code) goto _exit;
|
||||||
|
memset(pColData->pData, 0, pColData->nData);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
code = tColDataPutValue(pColData, pColVal);
|
||||||
|
if (code) goto _exit;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pColData->nVal++;
|
||||||
|
|
||||||
|
_exit:
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
static FORCE_INLINE int32_t tColDataAppendValue2(SColData *pColData, SColVal *pColVal) { // HAS_NULL
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
|
if (!pColVal->isNull) {
|
||||||
|
int32_t nBit = BIT1_SIZE(pColData->nVal + 1);
|
||||||
|
code = tRealloc(&pColData->pBitMap, nBit);
|
||||||
|
if (code) goto _exit;
|
||||||
|
|
||||||
|
if (pColVal->isNone) {
|
||||||
|
pColData->flag |= HAS_NONE;
|
||||||
|
|
||||||
|
memset(pColData->pBitMap, 255, nBit);
|
||||||
|
SET_BIT1(pColData->pBitMap, pColData->nVal, 0);
|
||||||
|
} else {
|
||||||
|
pColData->flag |= HAS_VALUE;
|
||||||
|
|
||||||
|
memset(pColData->pBitMap, 0, nBit);
|
||||||
|
SET_BIT1(pColData->pBitMap, pColData->nVal, 1);
|
||||||
|
|
||||||
|
if (pColData->nVal) {
|
||||||
|
if (IS_VAR_DATA_TYPE(pColData->type)) {
|
||||||
|
int32_t nOffset = sizeof(int32_t) * pColData->nVal;
|
||||||
|
code = tRealloc((uint8_t **)(&pColData->aOffset), nOffset);
|
||||||
|
if (code) goto _exit;
|
||||||
|
memset(pColData->aOffset, 0, nOffset);
|
||||||
|
} else {
|
||||||
|
pColData->nData = tDataTypes[pColData->type].bytes * pColData->nVal;
|
||||||
|
code = tRealloc(&pColData->pData, pColData->nData);
|
||||||
|
if (code) goto _exit;
|
||||||
|
memset(pColData->pData, 0, pColData->nData);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
code = tColDataPutValue(pColData, pColVal);
|
||||||
|
if (code) goto _exit;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pColData->nVal++;
|
||||||
|
|
||||||
|
_exit:
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
static FORCE_INLINE int32_t tColDataAppendValue3(SColData *pColData, SColVal *pColVal) { // HAS_NULL|HAS_NONE
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
|
if (pColVal->isNone) {
|
||||||
|
code = tRealloc(&pColData->pBitMap, BIT1_SIZE(pColData->nVal + 1));
|
||||||
|
if (code) goto _exit;
|
||||||
|
|
||||||
|
SET_BIT1(pColData->pBitMap, pColData->nVal, 0);
|
||||||
|
} else if (pColVal->isNull) {
|
||||||
|
code = tRealloc(&pColData->pBitMap, BIT1_SIZE(pColData->nVal + 1));
|
||||||
|
if (code) goto _exit;
|
||||||
|
|
||||||
|
SET_BIT1(pColData->pBitMap, pColData->nVal, 1);
|
||||||
|
} else {
|
||||||
|
pColData->flag |= HAS_VALUE;
|
||||||
|
|
||||||
|
uint8_t *pBitMap = NULL;
|
||||||
|
code = tRealloc(&pBitMap, BIT2_SIZE(pColData->nVal + 1));
|
||||||
|
if (code) goto _exit;
|
||||||
|
|
||||||
|
for (int32_t iVal = 0; iVal < pColData->nVal; iVal++) {
|
||||||
|
SET_BIT2(pBitMap, iVal, GET_BIT1(pColData->pBitMap, iVal));
|
||||||
|
}
|
||||||
|
SET_BIT2(pBitMap, pColData->nVal, 2);
|
||||||
|
|
||||||
|
tFree(pColData->pBitMap);
|
||||||
|
pColData->pBitMap = pBitMap;
|
||||||
|
|
||||||
|
if (pColData->nVal) {
|
||||||
|
if (IS_VAR_DATA_TYPE(pColData->type)) {
|
||||||
|
int32_t nOffset = sizeof(int32_t) * pColData->nVal;
|
||||||
|
code = tRealloc((uint8_t **)(&pColData->aOffset), nOffset);
|
||||||
|
if (code) goto _exit;
|
||||||
|
memset(pColData->aOffset, 0, nOffset);
|
||||||
|
} else {
|
||||||
|
pColData->nData = tDataTypes[pColData->type].bytes * pColData->nVal;
|
||||||
|
code = tRealloc(&pColData->pData, pColData->nData);
|
||||||
|
if (code) goto _exit;
|
||||||
|
memset(pColData->pData, 0, pColData->nData);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
code = tColDataPutValue(pColData, pColVal);
|
||||||
|
if (code) goto _exit;
|
||||||
|
}
|
||||||
|
pColData->nVal++;
|
||||||
|
|
||||||
|
_exit:
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
static FORCE_INLINE int32_t tColDataAppendValue4(SColData *pColData, SColVal *pColVal) { // HAS_VALUE
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
|
if (pColVal->isNone || pColVal->isNull) {
|
||||||
|
if (pColVal->isNone) {
|
||||||
|
pColData->flag |= HAS_NONE;
|
||||||
|
} else {
|
||||||
|
pColData->flag |= HAS_NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t nBit = BIT1_SIZE(pColData->nVal + 1);
|
||||||
|
code = tRealloc(&pColData->pBitMap, nBit);
|
||||||
|
if (code) goto _exit;
|
||||||
|
|
||||||
|
memset(pColData->pBitMap, 255, nBit);
|
||||||
|
SET_BIT1(pColData->pBitMap, pColData->nVal, 0);
|
||||||
|
|
||||||
|
code = tColDataPutValue(pColData, pColVal);
|
||||||
|
if (code) goto _exit;
|
||||||
|
} else {
|
||||||
|
code = tColDataPutValue(pColData, pColVal);
|
||||||
|
if (code) goto _exit;
|
||||||
|
}
|
||||||
|
pColData->nVal++;
|
||||||
|
|
||||||
|
_exit:
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
static FORCE_INLINE int32_t tColDataAppendValue5(SColData *pColData, SColVal *pColVal) { // HAS_VALUE|HAS_NONE
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
|
if (pColVal->isNull) {
|
||||||
|
pColData->flag |= HAS_NULL;
|
||||||
|
|
||||||
|
uint8_t *pBitMap = NULL;
|
||||||
|
code = tRealloc(&pBitMap, BIT2_SIZE(pColData->nVal + 1));
|
||||||
|
if (code) goto _exit;
|
||||||
|
|
||||||
|
for (int32_t iVal = 0; iVal < pColData->nVal; iVal++) {
|
||||||
|
SET_BIT2(pBitMap, iVal, GET_BIT1(pColData->pBitMap, iVal) ? 2 : 0);
|
||||||
|
}
|
||||||
|
SET_BIT2(pBitMap, pColData->nVal, 1);
|
||||||
|
|
||||||
|
tFree(pColData->pBitMap);
|
||||||
|
pColData->pBitMap = pBitMap;
|
||||||
|
} else {
|
||||||
|
code = tRealloc(&pColData->pBitMap, BIT1_SIZE(pColData->nVal + 1));
|
||||||
|
if (code) goto _exit;
|
||||||
|
|
||||||
|
if (pColVal->isNone) {
|
||||||
|
SET_BIT1(pColData->pBitMap, pColData->nVal, 0);
|
||||||
|
} else {
|
||||||
|
SET_BIT1(pColData->pBitMap, pColData->nVal, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
code = tColDataPutValue(pColData, pColVal);
|
||||||
|
if (code) goto _exit;
|
||||||
|
|
||||||
|
pColData->nVal++;
|
||||||
|
|
||||||
|
_exit:
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
static FORCE_INLINE int32_t tColDataAppendValue6(SColData *pColData, SColVal *pColVal) { // HAS_VALUE|HAS_NULL
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
|
if (pColVal->isNone) {
|
||||||
|
pColData->flag |= HAS_NONE;
|
||||||
|
|
||||||
|
uint8_t *pBitMap = NULL;
|
||||||
|
code = tRealloc(&pBitMap, BIT2_SIZE(pColData->nVal + 1));
|
||||||
|
if (code) goto _exit;
|
||||||
|
|
||||||
|
for (int32_t iVal = 0; iVal < pColData->nVal; iVal++) {
|
||||||
|
SET_BIT2(pBitMap, iVal, GET_BIT1(pColData->pBitMap, iVal) ? 2 : 1);
|
||||||
|
}
|
||||||
|
SET_BIT2(pBitMap, pColData->nVal, 0);
|
||||||
|
|
||||||
|
tFree(pColData->pBitMap);
|
||||||
|
pColData->pBitMap = pBitMap;
|
||||||
|
} else {
|
||||||
|
code = tRealloc(&pColData->pBitMap, BIT1_SIZE(pColData->nVal + 1));
|
||||||
|
if (code) goto _exit;
|
||||||
|
|
||||||
|
if (pColVal->isNull) {
|
||||||
|
SET_BIT1(pColData->pBitMap, pColData->nVal, 0);
|
||||||
|
} else {
|
||||||
|
SET_BIT1(pColData->pBitMap, pColData->nVal, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
code = tColDataPutValue(pColData, pColVal);
|
||||||
|
if (code) goto _exit;
|
||||||
|
|
||||||
|
pColData->nVal++;
|
||||||
|
|
||||||
|
_exit:
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
static FORCE_INLINE int32_t tColDataAppendValue7(SColData *pColData,
|
||||||
|
SColVal *pColVal) { // HAS_VALUE|HAS_NULL|HAS_NONE
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
|
code = tRealloc(&pColData->pBitMap, BIT2_SIZE(pColData->nVal + 1));
|
||||||
|
if (code) goto _exit;
|
||||||
|
|
||||||
|
if (pColVal->isNone) {
|
||||||
|
SET_BIT2(pColData->pBitMap, pColData->nVal, 0);
|
||||||
|
} else if (pColVal->isNull) {
|
||||||
|
SET_BIT2(pColData->pBitMap, pColData->nVal, 1);
|
||||||
|
} else {
|
||||||
|
SET_BIT2(pColData->pBitMap, pColData->nVal, 2);
|
||||||
|
}
|
||||||
|
code = tColDataPutValue(pColData, pColVal);
|
||||||
|
if (code) goto _exit;
|
||||||
|
|
||||||
|
pColData->nVal++;
|
||||||
|
|
||||||
|
_exit:
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
static int32_t (*tColDataAppendValueImpl[])(SColData *pColData, SColVal *pColVal) = {
|
||||||
|
tColDataAppendValue0, // 0
|
||||||
|
tColDataAppendValue1, // HAS_NONE
|
||||||
|
tColDataAppendValue2, // HAS_NULL
|
||||||
|
tColDataAppendValue3, // HAS_NULL|HAS_NONE
|
||||||
|
tColDataAppendValue4, // HAS_VALUE
|
||||||
|
tColDataAppendValue5, // HAS_VALUE|HAS_NONE
|
||||||
|
tColDataAppendValue6, // HAS_VALUE|HAS_NULL
|
||||||
|
tColDataAppendValue7 // HAS_VALUE|HAS_NULL|HAS_NONE
|
||||||
|
};
|
||||||
|
int32_t tColDataAppendValue(SColData *pColData, SColVal *pColVal) {
|
||||||
|
ASSERT(pColData->cid == pColVal->cid && pColData->type == pColVal->type);
|
||||||
|
return tColDataAppendValueImpl[pColData->flag](pColData, pColVal);
|
||||||
|
}
|
||||||
|
|
||||||
|
static FORCE_INLINE void tColDataGetValue1(SColData *pColData, int32_t iVal, SColVal *pColVal) { // HAS_NONE
|
||||||
|
*pColVal = COL_VAL_NONE(pColData->cid, pColData->type);
|
||||||
|
}
|
||||||
|
static FORCE_INLINE void tColDataGetValue2(SColData *pColData, int32_t iVal, SColVal *pColVal) { // HAS_NULL
|
||||||
|
*pColVal = COL_VAL_NULL(pColData->cid, pColData->type);
|
||||||
|
}
|
||||||
|
static FORCE_INLINE void tColDataGetValue3(SColData *pColData, int32_t iVal, SColVal *pColVal) { // HAS_NULL|HAS_NONE
|
||||||
|
switch (GET_BIT1(pColData->pBitMap, iVal)) {
|
||||||
|
case 0:
|
||||||
|
*pColVal = COL_VAL_NONE(pColData->cid, pColData->type);
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
*pColVal = COL_VAL_NULL(pColData->cid, pColData->type);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
ASSERT(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
static FORCE_INLINE void tColDataGetValue4(SColData *pColData, int32_t iVal, SColVal *pColVal) { // HAS_VALUE
|
||||||
|
SValue value;
|
||||||
|
if (IS_VAR_DATA_TYPE(pColData->type)) {
|
||||||
|
if (iVal + 1 < pColData->nVal) {
|
||||||
|
value.nData = pColData->aOffset[iVal + 1] - pColData->aOffset[iVal];
|
||||||
|
} else {
|
||||||
|
value.nData = pColData->nData - pColData->aOffset[iVal];
|
||||||
|
}
|
||||||
|
value.pData = pColData->pData + pColData->aOffset[iVal];
|
||||||
|
} else {
|
||||||
|
tGetValue(pColData->pData + tDataTypes[pColData->type].bytes * iVal, &value, pColData->type);
|
||||||
|
}
|
||||||
|
*pColVal = COL_VAL_VALUE(pColData->cid, pColData->type, value);
|
||||||
|
}
|
||||||
|
static FORCE_INLINE void tColDataGetValue5(SColData *pColData, int32_t iVal,
|
||||||
|
SColVal *pColVal) { // HAS_VALUE|HAS_NONE
|
||||||
|
switch (GET_BIT1(pColData->pBitMap, iVal)) {
|
||||||
|
case 0:
|
||||||
|
*pColVal = COL_VAL_NONE(pColData->cid, pColData->type);
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
tColDataGetValue4(pColData, iVal, pColVal);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
ASSERT(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
static FORCE_INLINE void tColDataGetValue6(SColData *pColData, int32_t iVal,
|
||||||
|
SColVal *pColVal) { // HAS_VALUE|HAS_NULL
|
||||||
|
switch (GET_BIT1(pColData->pBitMap, iVal)) {
|
||||||
|
case 0:
|
||||||
|
*pColVal = COL_VAL_NULL(pColData->cid, pColData->type);
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
tColDataGetValue4(pColData, iVal, pColVal);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
ASSERT(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
static FORCE_INLINE void tColDataGetValue7(SColData *pColData, int32_t iVal,
|
||||||
|
SColVal *pColVal) { // HAS_VALUE|HAS_NULL|HAS_NONE
|
||||||
|
switch (GET_BIT2(pColData->pBitMap, iVal)) {
|
||||||
|
case 0:
|
||||||
|
*pColVal = COL_VAL_NONE(pColData->cid, pColData->type);
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
*pColVal = COL_VAL_NULL(pColData->cid, pColData->type);
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
tColDataGetValue4(pColData, iVal, pColVal);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
ASSERT(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
static void (*tColDataGetValueImpl[])(SColData *pColData, int32_t iVal, SColVal *pColVal) = {
|
||||||
|
NULL, // 0
|
||||||
|
tColDataGetValue1, // HAS_NONE
|
||||||
|
tColDataGetValue2, // HAS_NULL
|
||||||
|
tColDataGetValue3, // HAS_NULL | HAS_NONE
|
||||||
|
tColDataGetValue4, // HAS_VALUE
|
||||||
|
tColDataGetValue5, // HAS_VALUE | HAS_NONE
|
||||||
|
tColDataGetValue6, // HAS_VALUE | HAS_NULL
|
||||||
|
tColDataGetValue7 // HAS_VALUE | HAS_NULL | HAS_NONE
|
||||||
|
};
|
||||||
|
void tColDataGetValue(SColData *pColData, int32_t iVal, SColVal *pColVal) {
|
||||||
|
ASSERT(iVal >= 0 && iVal < pColData->nVal && pColData->flag);
|
||||||
|
tColDataGetValueImpl[pColData->flag](pColData, iVal, pColVal);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint8_t tColDataGetBitValue(SColData *pColData, int32_t iVal) {
|
||||||
|
uint8_t v;
|
||||||
|
switch (pColData->flag) {
|
||||||
|
case HAS_NONE:
|
||||||
|
v = 0;
|
||||||
|
break;
|
||||||
|
case HAS_NULL:
|
||||||
|
v = 1;
|
||||||
|
break;
|
||||||
|
case (HAS_NULL | HAS_NONE):
|
||||||
|
v = GET_BIT1(pColData->pBitMap, iVal);
|
||||||
|
break;
|
||||||
|
case HAS_VALUE:
|
||||||
|
v = 2;
|
||||||
|
break;
|
||||||
|
case (HAS_VALUE | HAS_NONE):
|
||||||
|
v = GET_BIT1(pColData->pBitMap, iVal);
|
||||||
|
if (v) v = 2;
|
||||||
|
break;
|
||||||
|
case (HAS_VALUE | HAS_NULL):
|
||||||
|
v = GET_BIT1(pColData->pBitMap, iVal) + 1;
|
||||||
|
break;
|
||||||
|
case (HAS_VALUE | HAS_NULL | HAS_NONE):
|
||||||
|
v = GET_BIT2(pColData->pBitMap, iVal);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
ASSERT(0);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return v;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest) {
|
||||||
|
int32_t code = 0;
|
||||||
|
int32_t size;
|
||||||
|
|
||||||
|
ASSERT(pColDataSrc->nVal > 0);
|
||||||
|
ASSERT(pColDataDest->cid = pColDataSrc->cid);
|
||||||
|
ASSERT(pColDataDest->type = pColDataSrc->type);
|
||||||
|
|
||||||
|
pColDataDest->smaOn = pColDataSrc->smaOn;
|
||||||
|
pColDataDest->nVal = pColDataSrc->nVal;
|
||||||
|
pColDataDest->flag = pColDataSrc->flag;
|
||||||
|
|
||||||
|
// bitmap
|
||||||
|
if (pColDataSrc->flag != HAS_NONE && pColDataSrc->flag != HAS_NULL && pColDataSrc->flag != HAS_VALUE) {
|
||||||
|
size = BIT2_SIZE(pColDataSrc->nVal);
|
||||||
|
code = tRealloc(&pColDataDest->pBitMap, size);
|
||||||
|
if (code) goto _exit;
|
||||||
|
memcpy(pColDataDest->pBitMap, pColDataSrc->pBitMap, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// offset
|
||||||
|
if (IS_VAR_DATA_TYPE(pColDataDest->type)) {
|
||||||
|
size = sizeof(int32_t) * pColDataSrc->nVal;
|
||||||
|
|
||||||
|
code = tRealloc((uint8_t **)&pColDataDest->aOffset, size);
|
||||||
|
if (code) goto _exit;
|
||||||
|
|
||||||
|
memcpy(pColDataDest->aOffset, pColDataSrc->aOffset, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// value
|
||||||
|
pColDataDest->nData = pColDataSrc->nData;
|
||||||
|
code = tRealloc(&pColDataDest->pData, pColDataSrc->nData);
|
||||||
|
if (code) goto _exit;
|
||||||
|
memcpy(pColDataDest->pData, pColDataSrc->pData, pColDataDest->nData);
|
||||||
|
|
||||||
|
_exit:
|
||||||
|
return code;
|
||||||
|
}
|
|
@ -63,7 +63,7 @@ int32_t tsNumOfVnodeWriteThreads = 2;
|
||||||
int32_t tsNumOfVnodeSyncThreads = 2;
|
int32_t tsNumOfVnodeSyncThreads = 2;
|
||||||
int32_t tsNumOfVnodeRsmaThreads = 2;
|
int32_t tsNumOfVnodeRsmaThreads = 2;
|
||||||
int32_t tsNumOfQnodeQueryThreads = 4;
|
int32_t tsNumOfQnodeQueryThreads = 4;
|
||||||
int32_t tsNumOfQnodeFetchThreads = 4;
|
int32_t tsNumOfQnodeFetchThreads = 1;
|
||||||
int32_t tsNumOfSnodeSharedThreads = 2;
|
int32_t tsNumOfSnodeSharedThreads = 2;
|
||||||
int32_t tsNumOfSnodeUniqueThreads = 2;
|
int32_t tsNumOfSnodeUniqueThreads = 2;
|
||||||
|
|
||||||
|
@ -129,10 +129,6 @@ int32_t tsMinIntervalTime = 1;
|
||||||
int32_t tsQueryBufferSize = -1;
|
int32_t tsQueryBufferSize = -1;
|
||||||
int64_t tsQueryBufferSizeBytes = -1;
|
int64_t tsQueryBufferSizeBytes = -1;
|
||||||
|
|
||||||
// tsdb config
|
|
||||||
// For backward compatibility
|
|
||||||
bool tsdbForceKeepFile = false;
|
|
||||||
|
|
||||||
int32_t tsDiskCfgNum = 0;
|
int32_t tsDiskCfgNum = 0;
|
||||||
SDiskCfg tsDiskCfg[TFS_MAX_DISKS] = {0};
|
SDiskCfg tsDiskCfg[TFS_MAX_DISKS] = {0};
|
||||||
|
|
||||||
|
@ -389,9 +385,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
||||||
tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 4);
|
tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 4);
|
||||||
if (cfgAddInt32(pCfg, "numOfQnodeQueryThreads", tsNumOfQnodeQueryThreads, 1, 1024, 0) != 0) return -1;
|
if (cfgAddInt32(pCfg, "numOfQnodeQueryThreads", tsNumOfQnodeQueryThreads, 1, 1024, 0) != 0) return -1;
|
||||||
|
|
||||||
tsNumOfQnodeFetchThreads = tsNumOfCores / 2;
|
// tsNumOfQnodeFetchThreads = tsNumOfCores / 2;
|
||||||
tsNumOfQnodeFetchThreads = TMAX(tsNumOfQnodeFetchThreads, 4);
|
// tsNumOfQnodeFetchThreads = TMAX(tsNumOfQnodeFetchThreads, 4);
|
||||||
if (cfgAddInt32(pCfg, "numOfQnodeFetchThreads", tsNumOfQnodeFetchThreads, 1, 1024, 0) != 0) return -1;
|
// if (cfgAddInt32(pCfg, "numOfQnodeFetchThreads", tsNumOfQnodeFetchThreads, 1, 1024, 0) != 0) return -1;
|
||||||
|
|
||||||
tsNumOfSnodeSharedThreads = tsNumOfCores / 4;
|
tsNumOfSnodeSharedThreads = tsNumOfCores / 4;
|
||||||
tsNumOfSnodeSharedThreads = TRANGE(tsNumOfSnodeSharedThreads, 2, 4);
|
tsNumOfSnodeSharedThreads = TRANGE(tsNumOfSnodeSharedThreads, 2, 4);
|
||||||
|
@ -531,6 +527,7 @@ static int32_t taosUpdateServerCfg(SConfig *pCfg) {
|
||||||
pItem->stype = stype;
|
pItem->stype = stype;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
pItem = cfgGetItem(tsCfg, "numOfQnodeFetchThreads");
|
pItem = cfgGetItem(tsCfg, "numOfQnodeFetchThreads");
|
||||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||||
tsNumOfQnodeFetchThreads = numOfCores / 2;
|
tsNumOfQnodeFetchThreads = numOfCores / 2;
|
||||||
|
@ -538,6 +535,7 @@ static int32_t taosUpdateServerCfg(SConfig *pCfg) {
|
||||||
pItem->i32 = tsNumOfQnodeFetchThreads;
|
pItem->i32 = tsNumOfQnodeFetchThreads;
|
||||||
pItem->stype = stype;
|
pItem->stype = stype;
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
pItem = cfgGetItem(tsCfg, "numOfSnodeSharedThreads");
|
pItem = cfgGetItem(tsCfg, "numOfSnodeSharedThreads");
|
||||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||||
|
@ -695,7 +693,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
||||||
tsNumOfVnodeSyncThreads = cfgGetItem(pCfg, "numOfVnodeSyncThreads")->i32;
|
tsNumOfVnodeSyncThreads = cfgGetItem(pCfg, "numOfVnodeSyncThreads")->i32;
|
||||||
tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32;
|
tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32;
|
||||||
tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32;
|
tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32;
|
||||||
tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchThreads")->i32;
|
// tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchThreads")->i32;
|
||||||
tsNumOfSnodeSharedThreads = cfgGetItem(pCfg, "numOfSnodeSharedThreads")->i32;
|
tsNumOfSnodeSharedThreads = cfgGetItem(pCfg, "numOfSnodeSharedThreads")->i32;
|
||||||
tsNumOfSnodeUniqueThreads = cfgGetItem(pCfg, "numOfSnodeUniqueThreads")->i32;
|
tsNumOfSnodeUniqueThreads = cfgGetItem(pCfg, "numOfSnodeUniqueThreads")->i32;
|
||||||
tsRpcQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64;
|
tsRpcQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64;
|
||||||
|
@ -943,8 +941,10 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
|
||||||
tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32;
|
tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32;
|
||||||
} else if (strcasecmp("numOfQnodeQueryThreads", name) == 0) {
|
} else if (strcasecmp("numOfQnodeQueryThreads", name) == 0) {
|
||||||
tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32;
|
tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32;
|
||||||
|
/*
|
||||||
} else if (strcasecmp("numOfQnodeFetchThreads", name) == 0) {
|
} else if (strcasecmp("numOfQnodeFetchThreads", name) == 0) {
|
||||||
tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchThreads")->i32;
|
tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchThreads")->i32;
|
||||||
|
*/
|
||||||
} else if (strcasecmp("numOfSnodeSharedThreads", name) == 0) {
|
} else if (strcasecmp("numOfSnodeSharedThreads", name) == 0) {
|
||||||
tsNumOfSnodeSharedThreads = cfgGetItem(pCfg, "numOfSnodeSharedThreads")->i32;
|
tsNumOfSnodeSharedThreads = cfgGetItem(pCfg, "numOfSnodeSharedThreads")->i32;
|
||||||
} else if (strcasecmp("numOfSnodeUniqueThreads", name) == 0) {
|
} else if (strcasecmp("numOfSnodeUniqueThreads", name) == 0) {
|
||||||
|
@ -1133,7 +1133,7 @@ int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDi
|
||||||
|
|
||||||
taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32, false);
|
taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32, false);
|
||||||
|
|
||||||
if (taosMulMkDir(tsLogDir) != 0) {
|
if (taosMulModeMkDir(tsLogDir, 0777) != 0) {
|
||||||
uError("failed to create dir:%s since %s", tsLogDir, terrstr());
|
uError("failed to create dir:%s since %s", tsLogDir, terrstr());
|
||||||
cfgCleanup(pCfg);
|
cfgCleanup(pCfg);
|
||||||
return -1;
|
return -1;
|
||||||
|
|
|
@ -994,6 +994,7 @@ int32_t tSerializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
|
||||||
SVnodeLoad *pload = taosArrayGet(pReq->pVloads, i);
|
SVnodeLoad *pload = taosArrayGet(pReq->pVloads, i);
|
||||||
if (tEncodeI32(&encoder, pload->vgId) < 0) return -1;
|
if (tEncodeI32(&encoder, pload->vgId) < 0) return -1;
|
||||||
if (tEncodeI32(&encoder, pload->syncState) < 0) return -1;
|
if (tEncodeI32(&encoder, pload->syncState) < 0) return -1;
|
||||||
|
if (tEncodeI64(&encoder, pload->cacheUsage) < 0) return -1;
|
||||||
if (tEncodeI64(&encoder, pload->numOfTables) < 0) return -1;
|
if (tEncodeI64(&encoder, pload->numOfTables) < 0) return -1;
|
||||||
if (tEncodeI64(&encoder, pload->numOfTimeSeries) < 0) return -1;
|
if (tEncodeI64(&encoder, pload->numOfTimeSeries) < 0) return -1;
|
||||||
if (tEncodeI64(&encoder, pload->totalStorage) < 0) return -1;
|
if (tEncodeI64(&encoder, pload->totalStorage) < 0) return -1;
|
||||||
|
@ -1063,6 +1064,7 @@ int32_t tDeserializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
|
||||||
SVnodeLoad vload = {0};
|
SVnodeLoad vload = {0};
|
||||||
if (tDecodeI32(&decoder, &vload.vgId) < 0) return -1;
|
if (tDecodeI32(&decoder, &vload.vgId) < 0) return -1;
|
||||||
if (tDecodeI32(&decoder, &vload.syncState) < 0) return -1;
|
if (tDecodeI32(&decoder, &vload.syncState) < 0) return -1;
|
||||||
|
if (tDecodeI64(&decoder, &vload.cacheUsage) < 0) return -1;
|
||||||
if (tDecodeI64(&decoder, &vload.numOfTables) < 0) return -1;
|
if (tDecodeI64(&decoder, &vload.numOfTables) < 0) return -1;
|
||||||
if (tDecodeI64(&decoder, &vload.numOfTimeSeries) < 0) return -1;
|
if (tDecodeI64(&decoder, &vload.numOfTimeSeries) < 0) return -1;
|
||||||
if (tDecodeI64(&decoder, &vload.totalStorage) < 0) return -1;
|
if (tDecodeI64(&decoder, &vload.totalStorage) < 0) return -1;
|
||||||
|
@ -2024,6 +2026,9 @@ int32_t tSerializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq) {
|
||||||
if (tEncodeI64(&encoder, pReq->walRetentionSize) < 0) return -1;
|
if (tEncodeI64(&encoder, pReq->walRetentionSize) < 0) return -1;
|
||||||
if (tEncodeI32(&encoder, pReq->walRollPeriod) < 0) return -1;
|
if (tEncodeI32(&encoder, pReq->walRollPeriod) < 0) return -1;
|
||||||
if (tEncodeI64(&encoder, pReq->walSegmentSize) < 0) return -1;
|
if (tEncodeI64(&encoder, pReq->walSegmentSize) < 0) return -1;
|
||||||
|
if (tEncodeI32(&encoder, pReq->sstTrigger) < 0) return -1;
|
||||||
|
if (tEncodeI16(&encoder, pReq->hashPrefix) < 0) return -1;
|
||||||
|
if (tEncodeI16(&encoder, pReq->hashSuffix) < 0) return -1;
|
||||||
if (tEncodeI8(&encoder, pReq->ignoreExist) < 0) return -1;
|
if (tEncodeI8(&encoder, pReq->ignoreExist) < 0) return -1;
|
||||||
if (tEncodeI32(&encoder, pReq->numOfRetensions) < 0) return -1;
|
if (tEncodeI32(&encoder, pReq->numOfRetensions) < 0) return -1;
|
||||||
for (int32_t i = 0; i < pReq->numOfRetensions; ++i) {
|
for (int32_t i = 0; i < pReq->numOfRetensions; ++i) {
|
||||||
|
@ -2033,6 +2038,7 @@ int32_t tSerializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq) {
|
||||||
if (tEncodeI8(&encoder, pRetension->freqUnit) < 0) return -1;
|
if (tEncodeI8(&encoder, pRetension->freqUnit) < 0) return -1;
|
||||||
if (tEncodeI8(&encoder, pRetension->keepUnit) < 0) return -1;
|
if (tEncodeI8(&encoder, pRetension->keepUnit) < 0) return -1;
|
||||||
}
|
}
|
||||||
|
if (tEncodeI32(&encoder, pReq->tsdbPageSize) < 0) return -1;
|
||||||
tEndEncode(&encoder);
|
tEndEncode(&encoder);
|
||||||
|
|
||||||
int32_t tlen = encoder.pos;
|
int32_t tlen = encoder.pos;
|
||||||
|
@ -2070,6 +2076,9 @@ int32_t tDeserializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq)
|
||||||
if (tDecodeI64(&decoder, &pReq->walRetentionSize) < 0) return -1;
|
if (tDecodeI64(&decoder, &pReq->walRetentionSize) < 0) return -1;
|
||||||
if (tDecodeI32(&decoder, &pReq->walRollPeriod) < 0) return -1;
|
if (tDecodeI32(&decoder, &pReq->walRollPeriod) < 0) return -1;
|
||||||
if (tDecodeI64(&decoder, &pReq->walSegmentSize) < 0) return -1;
|
if (tDecodeI64(&decoder, &pReq->walSegmentSize) < 0) return -1;
|
||||||
|
if (tDecodeI32(&decoder, &pReq->sstTrigger) < 0) return -1;
|
||||||
|
if (tDecodeI16(&decoder, &pReq->hashPrefix) < 0) return -1;
|
||||||
|
if (tDecodeI16(&decoder, &pReq->hashSuffix) < 0) return -1;
|
||||||
if (tDecodeI8(&decoder, &pReq->ignoreExist) < 0) return -1;
|
if (tDecodeI8(&decoder, &pReq->ignoreExist) < 0) return -1;
|
||||||
if (tDecodeI32(&decoder, &pReq->numOfRetensions) < 0) return -1;
|
if (tDecodeI32(&decoder, &pReq->numOfRetensions) < 0) return -1;
|
||||||
pReq->pRetensions = taosArrayInit(pReq->numOfRetensions, sizeof(SRetention));
|
pReq->pRetensions = taosArrayInit(pReq->numOfRetensions, sizeof(SRetention));
|
||||||
|
@ -2090,6 +2099,8 @@ int32_t tDeserializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (tDecodeI32(&decoder, &pReq->tsdbPageSize) < 0) return -1;
|
||||||
|
|
||||||
tEndDecode(&decoder);
|
tEndDecode(&decoder);
|
||||||
|
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
@ -2120,6 +2131,7 @@ int32_t tSerializeSAlterDbReq(void *buf, int32_t bufLen, SAlterDbReq *pReq) {
|
||||||
if (tEncodeI8(&encoder, pReq->strict) < 0) return -1;
|
if (tEncodeI8(&encoder, pReq->strict) < 0) return -1;
|
||||||
if (tEncodeI8(&encoder, pReq->cacheLast) < 0) return -1;
|
if (tEncodeI8(&encoder, pReq->cacheLast) < 0) return -1;
|
||||||
if (tEncodeI8(&encoder, pReq->replications) < 0) return -1;
|
if (tEncodeI8(&encoder, pReq->replications) < 0) return -1;
|
||||||
|
if (tEncodeI32(&encoder, pReq->sstTrigger) < 0) return -1;
|
||||||
tEndEncode(&encoder);
|
tEndEncode(&encoder);
|
||||||
|
|
||||||
int32_t tlen = encoder.pos;
|
int32_t tlen = encoder.pos;
|
||||||
|
@ -2146,6 +2158,7 @@ int32_t tDeserializeSAlterDbReq(void *buf, int32_t bufLen, SAlterDbReq *pReq) {
|
||||||
if (tDecodeI8(&decoder, &pReq->strict) < 0) return -1;
|
if (tDecodeI8(&decoder, &pReq->strict) < 0) return -1;
|
||||||
if (tDecodeI8(&decoder, &pReq->cacheLast) < 0) return -1;
|
if (tDecodeI8(&decoder, &pReq->cacheLast) < 0) return -1;
|
||||||
if (tDecodeI8(&decoder, &pReq->replications) < 0) return -1;
|
if (tDecodeI8(&decoder, &pReq->replications) < 0) return -1;
|
||||||
|
if (tDecodeI32(&decoder, &pReq->sstTrigger) < 0) return -1;
|
||||||
tEndDecode(&decoder);
|
tEndDecode(&decoder);
|
||||||
|
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
@ -2453,6 +2466,8 @@ int32_t tSerializeSUseDbRspImp(SEncoder *pEncoder, const SUseDbRsp *pRsp) {
|
||||||
if (tEncodeI64(pEncoder, pRsp->uid) < 0) return -1;
|
if (tEncodeI64(pEncoder, pRsp->uid) < 0) return -1;
|
||||||
if (tEncodeI32(pEncoder, pRsp->vgVersion) < 0) return -1;
|
if (tEncodeI32(pEncoder, pRsp->vgVersion) < 0) return -1;
|
||||||
if (tEncodeI32(pEncoder, pRsp->vgNum) < 0) return -1;
|
if (tEncodeI32(pEncoder, pRsp->vgNum) < 0) return -1;
|
||||||
|
if (tEncodeI16(pEncoder, pRsp->hashPrefix) < 0) return -1;
|
||||||
|
if (tEncodeI16(pEncoder, pRsp->hashSuffix) < 0) return -1;
|
||||||
if (tEncodeI8(pEncoder, pRsp->hashMethod) < 0) return -1;
|
if (tEncodeI8(pEncoder, pRsp->hashMethod) < 0) return -1;
|
||||||
|
|
||||||
for (int32_t i = 0; i < pRsp->vgNum; ++i) {
|
for (int32_t i = 0; i < pRsp->vgNum; ++i) {
|
||||||
|
@ -2504,6 +2519,8 @@ int32_t tDeserializeSUseDbRspImp(SDecoder *pDecoder, SUseDbRsp *pRsp) {
|
||||||
if (tDecodeI64(pDecoder, &pRsp->uid) < 0) return -1;
|
if (tDecodeI64(pDecoder, &pRsp->uid) < 0) return -1;
|
||||||
if (tDecodeI32(pDecoder, &pRsp->vgVersion) < 0) return -1;
|
if (tDecodeI32(pDecoder, &pRsp->vgVersion) < 0) return -1;
|
||||||
if (tDecodeI32(pDecoder, &pRsp->vgNum) < 0) return -1;
|
if (tDecodeI32(pDecoder, &pRsp->vgNum) < 0) return -1;
|
||||||
|
if (tDecodeI16(pDecoder, &pRsp->hashPrefix) < 0) return -1;
|
||||||
|
if (tDecodeI16(pDecoder, &pRsp->hashSuffix) < 0) return -1;
|
||||||
if (tDecodeI8(pDecoder, &pRsp->hashMethod) < 0) return -1;
|
if (tDecodeI8(pDecoder, &pRsp->hashMethod) < 0) return -1;
|
||||||
|
|
||||||
if (pRsp->vgNum <= 0) {
|
if (pRsp->vgNum <= 0) {
|
||||||
|
@ -3330,7 +3347,13 @@ int32_t tDeserializeSSTbHbRsp(void *buf, int32_t bufLen, SSTbHbRsp *pRsp) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void tFreeSTableMetaRsp(void *pRsp) { taosMemoryFreeClear(((STableMetaRsp *)pRsp)->pSchemas); }
|
void tFreeSTableMetaRsp(void *pRsp) {
|
||||||
|
if (NULL == pRsp) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
taosMemoryFreeClear(((STableMetaRsp *)pRsp)->pSchemas);
|
||||||
|
}
|
||||||
|
|
||||||
void tFreeSTableIndexRsp(void *info) {
|
void tFreeSTableIndexRsp(void *info) {
|
||||||
if (NULL == info) {
|
if (NULL == info) {
|
||||||
|
@ -3762,6 +3785,10 @@ int32_t tSerializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *pR
|
||||||
if (tEncodeI64(&encoder, pReq->walRetentionSize) < 0) return -1;
|
if (tEncodeI64(&encoder, pReq->walRetentionSize) < 0) return -1;
|
||||||
if (tEncodeI32(&encoder, pReq->walRollPeriod) < 0) return -1;
|
if (tEncodeI32(&encoder, pReq->walRollPeriod) < 0) return -1;
|
||||||
if (tEncodeI64(&encoder, pReq->walSegmentSize) < 0) return -1;
|
if (tEncodeI64(&encoder, pReq->walSegmentSize) < 0) return -1;
|
||||||
|
if (tEncodeI16(&encoder, pReq->sstTrigger) < 0) return -1;
|
||||||
|
if (tEncodeI16(&encoder, pReq->hashPrefix) < 0) return -1;
|
||||||
|
if (tEncodeI16(&encoder, pReq->hashSuffix) < 0) return -1;
|
||||||
|
if (tEncodeI32(&encoder, pReq->tsdbPageSize) < 0) return -1;
|
||||||
|
|
||||||
tEndEncode(&encoder);
|
tEndEncode(&encoder);
|
||||||
|
|
||||||
|
@ -3834,6 +3861,10 @@ int32_t tDeserializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *
|
||||||
if (tDecodeI64(&decoder, &pReq->walRetentionSize) < 0) return -1;
|
if (tDecodeI64(&decoder, &pReq->walRetentionSize) < 0) return -1;
|
||||||
if (tDecodeI32(&decoder, &pReq->walRollPeriod) < 0) return -1;
|
if (tDecodeI32(&decoder, &pReq->walRollPeriod) < 0) return -1;
|
||||||
if (tDecodeI64(&decoder, &pReq->walSegmentSize) < 0) return -1;
|
if (tDecodeI64(&decoder, &pReq->walSegmentSize) < 0) return -1;
|
||||||
|
if (tDecodeI16(&decoder, &pReq->sstTrigger) < 0) return -1;
|
||||||
|
if (tDecodeI16(&decoder, &pReq->hashPrefix) < 0) return -1;
|
||||||
|
if (tDecodeI16(&decoder, &pReq->hashSuffix) < 0) return -1;
|
||||||
|
if (tDecodeI32(&decoder, &pReq->tsdbPageSize) < 0) return -1;
|
||||||
|
|
||||||
tEndDecode(&decoder);
|
tEndDecode(&decoder);
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
@ -4711,9 +4742,8 @@ int32_t tSerializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq) {
|
||||||
if (tEncodeU64(&encoder, pReq->queryId) < 0) return -1;
|
if (tEncodeU64(&encoder, pReq->queryId) < 0) return -1;
|
||||||
if (tEncodeU64(&encoder, pReq->taskId) < 0) return -1;
|
if (tEncodeU64(&encoder, pReq->taskId) < 0) return -1;
|
||||||
if (tEncodeU32(&encoder, pReq->sqlLen) < 0) return -1;
|
if (tEncodeU32(&encoder, pReq->sqlLen) < 0) return -1;
|
||||||
if (tEncodeU32(&encoder, pReq->phyLen) < 0) return -1;
|
|
||||||
if (tEncodeCStr(&encoder, pReq->sql) < 0) return -1;
|
if (tEncodeCStr(&encoder, pReq->sql) < 0) return -1;
|
||||||
if (tEncodeCStr(&encoder, pReq->msg) < 0) return -1;
|
if (tEncodeBinary(&encoder, pReq->msg, pReq->phyLen) < 0) return -1;
|
||||||
tEndEncode(&encoder);
|
tEndEncode(&encoder);
|
||||||
|
|
||||||
int32_t tlen = encoder.pos;
|
int32_t tlen = encoder.pos;
|
||||||
|
@ -4743,13 +4773,12 @@ int32_t tDeserializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq) {
|
||||||
if (tDecodeU64(&decoder, &pReq->queryId) < 0) return -1;
|
if (tDecodeU64(&decoder, &pReq->queryId) < 0) return -1;
|
||||||
if (tDecodeU64(&decoder, &pReq->taskId) < 0) return -1;
|
if (tDecodeU64(&decoder, &pReq->taskId) < 0) return -1;
|
||||||
if (tDecodeU32(&decoder, &pReq->sqlLen) < 0) return -1;
|
if (tDecodeU32(&decoder, &pReq->sqlLen) < 0) return -1;
|
||||||
if (tDecodeU32(&decoder, &pReq->phyLen) < 0) return -1;
|
|
||||||
pReq->sql = taosMemoryCalloc(1, pReq->sqlLen + 1);
|
pReq->sql = taosMemoryCalloc(1, pReq->sqlLen + 1);
|
||||||
if (NULL == pReq->sql) return -1;
|
if (NULL == pReq->sql) return -1;
|
||||||
pReq->msg = taosMemoryCalloc(1, pReq->phyLen + 1);
|
|
||||||
if (NULL == pReq->msg) return -1;
|
|
||||||
if (tDecodeCStrTo(&decoder, pReq->sql) < 0) return -1;
|
if (tDecodeCStrTo(&decoder, pReq->sql) < 0) return -1;
|
||||||
if (tDecodeCStrTo(&decoder, pReq->msg) < 0) return -1;
|
uint64_t msgLen = 0;
|
||||||
|
if (tDecodeBinaryAlloc(&decoder, (void **)&pReq->msg, &msgLen) < 0) return -1;
|
||||||
|
pReq->phyLen = msgLen;
|
||||||
|
|
||||||
tEndDecode(&decoder);
|
tEndDecode(&decoder);
|
||||||
|
|
||||||
|
@ -5429,6 +5458,8 @@ void tFreeSSubmitRsp(SSubmitRsp *pRsp) {
|
||||||
for (int32_t i = 0; i < pRsp->nBlocks; ++i) {
|
for (int32_t i = 0; i < pRsp->nBlocks; ++i) {
|
||||||
SSubmitBlkRsp *sRsp = pRsp->pBlocks + i;
|
SSubmitBlkRsp *sRsp = pRsp->pBlocks + i;
|
||||||
taosMemoryFree(sRsp->tblFName);
|
taosMemoryFree(sRsp->tblFName);
|
||||||
|
tFreeSTableMetaRsp(sRsp->pMeta);
|
||||||
|
taosMemoryFree(sRsp->pMeta);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosMemoryFree(pRsp->pBlocks);
|
taosMemoryFree(pRsp->pBlocks);
|
||||||
|
|
|
@ -538,12 +538,12 @@ bool tdSTSRowIterGetTpVal(STSRowIter *pIter, col_type_t colType, int32_t offset,
|
||||||
} else {
|
} else {
|
||||||
pVal->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset);
|
pVal->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset);
|
||||||
}
|
}
|
||||||
return TSDB_CODE_SUCCESS;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tdGetBitmapValType(pIter->pBitmap, pIter->colIdx - 1, &pVal->valType, 0) != TSDB_CODE_SUCCESS) {
|
if (tdGetBitmapValType(pIter->pBitmap, pIter->colIdx - 1, &pVal->valType, 0) != TSDB_CODE_SUCCESS) {
|
||||||
pVal->valType = TD_VTYPE_NONE;
|
pVal->valType = TD_VTYPE_NONE;
|
||||||
return terrno;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pVal->valType == TD_VTYPE_NORM) {
|
if (pVal->valType == TD_VTYPE_NORM) {
|
||||||
|
|
|
@ -167,9 +167,13 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) {
|
||||||
pCfg->walCfg.segSize = pCreate->walSegmentSize;
|
pCfg->walCfg.segSize = pCreate->walSegmentSize;
|
||||||
pCfg->walCfg.level = pCreate->walLevel;
|
pCfg->walCfg.level = pCreate->walLevel;
|
||||||
|
|
||||||
|
pCfg->sttTrigger = pCreate->sstTrigger;
|
||||||
pCfg->hashBegin = pCreate->hashBegin;
|
pCfg->hashBegin = pCreate->hashBegin;
|
||||||
pCfg->hashEnd = pCreate->hashEnd;
|
pCfg->hashEnd = pCreate->hashEnd;
|
||||||
pCfg->hashMethod = pCreate->hashMethod;
|
pCfg->hashMethod = pCreate->hashMethod;
|
||||||
|
pCfg->hashPrefix = pCreate->hashPrefix;
|
||||||
|
pCfg->hashSuffix = pCreate->hashSuffix;
|
||||||
|
pCfg->tsdbPageSize = pCreate->tsdbPageSize * 1024;
|
||||||
|
|
||||||
pCfg->standby = pCfg->standby;
|
pCfg->standby = pCfg->standby;
|
||||||
pCfg->syncCfg.myIndex = pCreate->selfIndex;
|
pCfg->syncCfg.myIndex = pCreate->selfIndex;
|
||||||
|
@ -219,8 +223,13 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
dDebug("vgId:%d, start to create vnode, tsma:%d standby:%d cacheLast:%d cacheLastSize:%d", createReq.vgId,
|
dInfo(
|
||||||
createReq.isTsma, createReq.standby, createReq.cacheLast, createReq.cacheLastSize);
|
"vgId:%d, start to create vnode, tsma:%d standby:%d cacheLast:%d cacheLastSize:%d sstTrigger:%d "
|
||||||
|
"tsdbPageSize:%d",
|
||||||
|
createReq.vgId, createReq.isTsma, createReq.standby, createReq.cacheLast, createReq.cacheLastSize,
|
||||||
|
createReq.sstTrigger, createReq.tsdbPageSize);
|
||||||
|
dInfo("vgId:%d, hashMethod:%d begin:%u end:%u prefix:%d surfix:%d", createReq.vgId, createReq.hashMethod,
|
||||||
|
createReq.hashBegin, createReq.hashEnd, createReq.hashPrefix, createReq.hashSuffix);
|
||||||
vmGenerateVnodeCfg(&createReq, &vnodeCfg);
|
vmGenerateVnodeCfg(&createReq, &vnodeCfg);
|
||||||
|
|
||||||
if (vmTsmaAdjustDays(&vnodeCfg, &createReq) < 0) {
|
if (vmTsmaAdjustDays(&vnodeCfg, &createReq) < 0) {
|
||||||
|
|
|
@ -305,11 +305,15 @@ typedef struct {
|
||||||
int8_t hashMethod; // default is 1
|
int8_t hashMethod; // default is 1
|
||||||
int8_t cacheLast;
|
int8_t cacheLast;
|
||||||
int8_t schemaless;
|
int8_t schemaless;
|
||||||
|
int16_t hashPrefix;
|
||||||
|
int16_t hashSuffix;
|
||||||
|
int16_t sstTrigger;
|
||||||
|
int32_t tsdbPageSize;
|
||||||
int32_t numOfRetensions;
|
int32_t numOfRetensions;
|
||||||
SArray* pRetensions;
|
SArray* pRetensions;
|
||||||
int32_t walRetentionPeriod;
|
int32_t walRetentionPeriod;
|
||||||
int64_t walRetentionSize;
|
|
||||||
int32_t walRollPeriod;
|
int32_t walRollPeriod;
|
||||||
|
int64_t walRetentionSize;
|
||||||
int64_t walSegmentSize;
|
int64_t walSegmentSize;
|
||||||
} SDbCfg;
|
} SDbCfg;
|
||||||
|
|
||||||
|
@ -340,6 +344,7 @@ typedef struct {
|
||||||
uint32_t hashEnd;
|
uint32_t hashEnd;
|
||||||
char dbName[TSDB_DB_FNAME_LEN];
|
char dbName[TSDB_DB_FNAME_LEN];
|
||||||
int64_t dbUid;
|
int64_t dbUid;
|
||||||
|
int64_t cacheUsage;
|
||||||
int64_t numOfTables;
|
int64_t numOfTables;
|
||||||
int64_t numOfTimeSeries;
|
int64_t numOfTimeSeries;
|
||||||
int64_t totalStorage;
|
int64_t totalStorage;
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue