Merge branch '3.0' into feature/TD-14481-3.0
This commit is contained in:
commit
79e2190522
|
@ -243,7 +243,7 @@ void console(SRaftServer *pRaftServer) {
|
|||
|
||||
|
||||
} else if (strcmp(cmd, "dropnode") == 0) {
|
||||
char host[HOST_LEN];
|
||||
char host[HOST_LEN] = {0};
|
||||
uint32_t port;
|
||||
parseAddr(param1, host, HOST_LEN, &port);
|
||||
uint64_t rid = raftId(host, port);
|
||||
|
@ -258,7 +258,7 @@ void console(SRaftServer *pRaftServer) {
|
|||
|
||||
|
||||
} else if (strcmp(cmd, "put") == 0) {
|
||||
char buf[256];
|
||||
char buf[256] = {0};
|
||||
snprintf(buf, sizeof(buf), "%s--%s", param1, param2);
|
||||
putValue(&pRaftServer->raft, buf);
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
sidebar_label: Connection
|
||||
title: Connect to TDengine
|
||||
sidebar_label: Connect
|
||||
title: Connect
|
||||
description: "This document explains how to establish connections to TDengine, and briefly introduces how to install and use TDengine connectors."
|
||||
---
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
sidebar_label: SQL
|
||||
sidebar_label: Insert Using SQL
|
||||
title: Insert Using SQL
|
||||
---
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
title: Insert
|
||||
title: Insert Data
|
||||
---
|
||||
|
||||
TDengine supports multiple protocols of inserting data, including SQL, InfluxDB Line protocol, OpenTSDB Telnet protocol, and OpenTSDB JSON protocol. Data can be inserted row by row, or in batches. Data from one or more collection points can be inserted simultaneously. Data can be inserted with multiple threads, and out of order data and historical data can be inserted as well. InfluxDB Line protocol, OpenTSDB Telnet protocol and OpenTSDB JSON protocol are the 3 kinds of schemaless insert protocols supported by TDengine. It's not necessary to create STables and tables in advance if using schemaless protocols, and the schemas can be adjusted automatically based on the data being inserted.
|
||||
|
|
|
@ -1 +1 @@
|
|||
label: Select Data
|
||||
label: Query Data
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
Sidebar_label: Select
|
||||
title: Select
|
||||
Sidebar_label: Query data
|
||||
title: Query data
|
||||
description: "This chapter introduces major query functionalities and how to perform sync and async query using connectors."
|
||||
---
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
sidebar_label: Subscription
|
||||
sidebar_label: Data Subscription
|
||||
description: "Lightweight service for data subscription and publishing. Time series data inserted into TDengine continuously can be pushed automatically to subscribing clients."
|
||||
title: Data Subscription
|
||||
---
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
sidebar_label: UDF
|
||||
title: User Defined Functions
|
||||
title: User Defined Functions(UDF)
|
||||
description: "Scalar functions and aggregate functions developed by users can be utilized by the query framework to expand query capability"
|
||||
---
|
||||
|
||||
|
|
|
@ -22,8 +22,8 @@ SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
|
|||
|
||||
**More explanation**:
|
||||
|
||||
- Wildcard (\*) can be used to represent all columns, it's used to get the number of all rows
|
||||
- The number of non-NULL values will be returned if this function is used on a specific column
|
||||
- Wildcard (\*) is used to represent all columns. The `COUNT` function is used to get the total number of all rows.
|
||||
- The number of non-NULL values will be returned if this function is used on a specific column.
|
||||
|
||||
**Examples**:
|
||||
|
||||
|
@ -87,7 +87,7 @@ SELECT TWA(field_name) FROM tb_name WHERE clause;
|
|||
|
||||
**More explanations**:
|
||||
|
||||
- From version 2.1.3.0, function TWA can be used on stable with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable.
|
||||
- Since version 2.1.3.0, function TWA can be used on stable with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable.
|
||||
|
||||
### IRATE
|
||||
|
||||
|
@ -105,7 +105,7 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause;
|
|||
|
||||
**More explanations**:
|
||||
|
||||
- From version 2.1.3.0, function IRATE can be used on stble with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable.
|
||||
- Since version 2.1.3.0, function IRATE can be used on stble with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable.
|
||||
|
||||
### SUM
|
||||
|
||||
|
@ -149,7 +149,7 @@ SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
|
|||
|
||||
**Applicable column types**: Data types except for timestamp, binary, nchar and bool
|
||||
|
||||
**Applicable table types**: table, STable (starting from version 2.0.15.1)
|
||||
**Applicable table types**: table, STable (since version 2.0.15.1)
|
||||
|
||||
**Examples**:
|
||||
|
||||
|
@ -193,13 +193,13 @@ SELECT MODE(field_name) FROM tb_name [WHERE clause];
|
|||
|
||||
**Description**:The value which has the highest frequency of occurrence. NULL is returned if there are multiple values which have highest frequency of occurrence. It can't be used on timestamp column or tags.
|
||||
|
||||
**Return value type**:Same as the data type of the column being operated
|
||||
**Return value type**:Same as the data type of the column being operated upon
|
||||
|
||||
**Applicable column types**:Data types except for timestamp
|
||||
|
||||
**More explanations**:Considering the number of returned result set is unpredictable, it's suggested to limit the number of unique values to 100,000, otherwise error will be returned.
|
||||
|
||||
**Applicable version**:From version 2.6.0.0
|
||||
**Applicable version**:Since version 2.6.0.0
|
||||
|
||||
**Examples**:
|
||||
|
||||
|
@ -234,7 +234,7 @@ SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
**More explanations**: The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge. However, when the data volume is very small, the result may be not accurate, it's recommented to use `select count(data) from (select unique(col) as data from table)` in this case.
|
||||
|
||||
**Applicable versions**:From version 2.6.0.0
|
||||
**Applicable versions**:Since version 2.6.0.0
|
||||
|
||||
**Examples**:
|
||||
|
||||
|
@ -271,7 +271,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
|||
|
||||
**Description**: The minimum value of a specific column in a table or STable
|
||||
|
||||
**Return value type**: Same as the data type of the column being operated
|
||||
**Return value type**: Same as the data type of the column being operated upon
|
||||
|
||||
**Applicable column types**: Data types except for timestamp, binary, nchar and bool
|
||||
|
||||
|
@ -301,7 +301,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
**Description**: The maximum value of a specific column of a table or STable
|
||||
|
||||
**Return value type**: Same as the data type of the column being operated
|
||||
**Return value type**: Same as the data type of the column being operated upon
|
||||
|
||||
**Applicable column types**: Data types except for timestamp, binary, nchar and bool
|
||||
|
||||
|
@ -331,7 +331,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
**Description**: The first non-null value of a specific column in a table or STable
|
||||
|
||||
**Return value type**: Same as the column being operated
|
||||
**Return value type**: Same as the column being operated upon
|
||||
|
||||
**Applicable column types**: Any data type
|
||||
|
||||
|
@ -341,7 +341,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
- FIRST(\*) can be used to get the first non-null value of all columns
|
||||
- NULL will be returned if all the values of the specified column are all NULL
|
||||
- No result will NOT be returned if all the columns in the result set are all NULL
|
||||
- A result will NOT be returned if all the columns in the result set are all NULL
|
||||
|
||||
**Examples**:
|
||||
|
||||
|
@ -367,7 +367,7 @@ SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
**Description**: The last non-NULL value of a specific column in a table or STable
|
||||
|
||||
**Return value type**: Same as the column being operated
|
||||
**Return value type**: Same as the column being operated upon
|
||||
|
||||
**Applicable column types**: Any data type
|
||||
|
||||
|
@ -403,7 +403,7 @@ SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
**Description**: The greatest _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly.
|
||||
|
||||
**Return value type**: Same as the column being operated
|
||||
**Return value type**: Same as the column being operated upon
|
||||
|
||||
**Applicable column types**: Data types except for timestamp, binary, nchar and bool
|
||||
|
||||
|
@ -442,7 +442,7 @@ SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
**Description**: The least _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly.
|
||||
|
||||
**Return value type**: Same as the column being operated
|
||||
**Return value type**: Same as the column being operated upon
|
||||
|
||||
**Applicable column types**: Data types except for timestamp, binary, nchar and bool
|
||||
|
||||
|
@ -549,7 +549,7 @@ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
|
|||
|
||||
**Description**: The last row of a table or STable
|
||||
|
||||
**Return value type**: Same as the column being operated
|
||||
**Return value type**: Same as the column being operated upon
|
||||
|
||||
**Applicable column types**: Any data type
|
||||
|
||||
|
@ -576,7 +576,7 @@ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
|
|||
Query OK, 1 row(s) in set (0.001042s)
|
||||
```
|
||||
|
||||
### INTERP [From version 2.3.1]
|
||||
### INTERP [Since version 2.3.1]
|
||||
|
||||
```
|
||||
SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
|
||||
|
@ -584,7 +584,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [
|
|||
|
||||
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned.
|
||||
|
||||
**Return value type**: Same as the column being operated
|
||||
**Return value type**: Same as the column being operated upon
|
||||
|
||||
**Applicable column types**: Numeric data types
|
||||
|
||||
|
@ -593,7 +593,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [
|
|||
**More explanations**
|
||||
|
||||
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
|
||||
- The input data of `INTERP` is the value of the specified column, `where` can be used to filter the original data. If no `where` condition is specified then all original data is the input.
|
||||
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
|
||||
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. If `RANGE` is not specified, then the timestamp of the first row that matches the filter condition is treated as timestamp1, the timestamp of the last row that matches the filter condition is treated as timestamp2.
|
||||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. If `EVERY` parameter is not used, the time windows will be considered as no ending timestamp, i.e. there is only one time window from timestamp1.
|
||||
- Interpolation is performed based on `FILL` parameter. No interpolation is performed if `FILL` is not used, that means either the original data that matches is returned or nothing is returned.
|
||||
|
@ -632,7 +632,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [
|
|||
taos> SELECT INTERP(current) FROM t1 where ts >= '2017-07-14 17:00:00' and ts <= '2017-07-14 20:00:00' RANGE('2017-7-14 18:00:00','2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR);
|
||||
```
|
||||
|
||||
### INTERP [Prior to version 2.3.1]
|
||||
### INTERP [Since version 2.0.15.0]
|
||||
|
||||
```
|
||||
SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
|
||||
|
@ -640,7 +640,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL
|
|||
|
||||
**Description**: The value of a specific column that matches the specified time slice
|
||||
|
||||
**Return value type**: Same as the column being operated
|
||||
**Return value type**: Same as the column being operated upon
|
||||
|
||||
**Applicable column types**: Numeric data type
|
||||
|
||||
|
@ -648,7 +648,6 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL
|
|||
|
||||
**More explanations**:
|
||||
|
||||
- It can be used from version 2.0.15.0
|
||||
- Time slice must be specified. If there is no data matching the specified time slice, interpolation is performed based on `FILL` parameter. Conditions such as tags or `tbname` can be used `Where` clause can be used to filter data.
|
||||
- The timestamp specified must be within the time range of the data rows of the table or STable. If it is beyond the valid time range, nothing is returned even with `FILL` parameter.
|
||||
- `INTERP` can be used to query only single time point once. `INTERP` can be used with `EVERY` to get the interpolation value every time interval.
|
||||
|
@ -696,11 +695,11 @@ SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
|
|||
|
||||
**Parameter value range**: k: [1,100] offset_val: [0,100]
|
||||
|
||||
**Return value type**: Same as the column being operated
|
||||
**Return value type**: Same as the column being operated upon
|
||||
|
||||
**Applicable column types**: Any data type except form timestamp, i.e. the primary key
|
||||
|
||||
**Applicable versions**: From version 2.6.0.0
|
||||
**Applicable versions**: Since version 2.6.0.0
|
||||
|
||||
**Examples**:
|
||||
|
||||
|
@ -732,11 +731,11 @@ SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
|||
|
||||
**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp.
|
||||
|
||||
**Return value type**: Same as the column or tag being operated
|
||||
**Return value type**: Same as the column or tag being operated upon
|
||||
|
||||
**Applicable column types**: Any data types except for timestamp
|
||||
|
||||
**Applicable versions**: From version 2.6.0.0
|
||||
**Applicable versions**: Since version 2.6.0.0
|
||||
|
||||
**More explanations**:
|
||||
|
||||
|
@ -780,7 +779,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER
|
|||
|
||||
**Description**: The different of each row with its previous row for a specific column. `ignore_negative` can be specified as 0 or 1, the default value is 1 if it's not specified. `1` means negative values are ignored.
|
||||
|
||||
**Return value type**: Same as the column being operated
|
||||
**Return value type**: Same as the column being operated upon
|
||||
|
||||
**Applicable column types**: Data types except for timestamp, binary, nchar and bool
|
||||
|
||||
|
@ -789,8 +788,8 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER
|
|||
**More explanations**:
|
||||
|
||||
- The number of result rows is the number of rows subtracted by one, no output for the first row
|
||||
- From version 2.1.30, `DIFF` can be used on STable with `GROUP by tbname`
|
||||
- From version 2.6.0, `ignore_negative` parameter is supported
|
||||
- Since version 2.1.30, `DIFF` can be used on STable with `GROUP by tbname`
|
||||
- Since version 2.6.0, `ignore_negative` parameter is supported
|
||||
|
||||
**Examples**:
|
||||
|
||||
|
@ -874,7 +873,7 @@ Query OK, 1 row(s) in set (0.000836s)
|
|||
SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
**Description**: The round up value of a specific column
|
||||
**Description**: The rounded up value of a specific column
|
||||
|
||||
**Return value type**: Same as the column being used
|
||||
|
||||
|
@ -896,9 +895,9 @@ SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
**Description**: The round down value of a specific column
|
||||
**Description**: The rounded down value of a specific column
|
||||
|
||||
**More explanations**: The restrictions are same as `CEIL` function.
|
||||
**More explanations**: The restrictions are same as those of the `CEIL` function.
|
||||
|
||||
### ROUND
|
||||
|
||||
|
@ -906,7 +905,7 @@ SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
**Description**: The round value of a specific column.
|
||||
**Description**: The rounded value of a specific column.
|
||||
|
||||
**More explanations**: The restrictions are same as `CEIL` function.
|
||||
|
||||
|
@ -933,7 +932,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
- Can only be used with aggregate functions
|
||||
- `Group by tbname` must be used together on a STable to force the result on a single timeline
|
||||
|
||||
**Applicable versions**: From 2.3.0.x
|
||||
**Applicable versions**: Since 2.3.0.x
|
||||
|
||||
### MAVG
|
||||
|
||||
|
@ -958,7 +957,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
- Can't be used with aggregate functions.
|
||||
- Must be used with `GROUP BY tbname` when it's used on a STable to force the result on each single timeline.
|
||||
|
||||
**Applicable versions**: From 2.3.0.x
|
||||
**Applicable versions**: Since 2.3.0.x
|
||||
|
||||
### SAMPLE
|
||||
|
||||
|
@ -981,7 +980,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
- Arithmetic operation can't be operated on the result of `SAMPLE` function
|
||||
- Must be used with `Group by tbname` when it's used on a STable to force the result on each single timeline
|
||||
|
||||
**Applicable versions**: From 2.3.0.x
|
||||
**Applicable versions**: Since 2.3.0.x
|
||||
|
||||
### ASIN
|
||||
|
||||
|
@ -1460,8 +1459,8 @@ SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WH
|
|||
|
||||
**More explanations**:
|
||||
|
||||
- Arithmetic operations can be performed on two or more columns, `()` can be used to control the precedence
|
||||
- NULL doesn't participate the operation, if one of the operands is NULL then result is NULL
|
||||
- Arithmetic operations can be performed on two or more columns, Parentheses `()` can be used to control the order of precedence.
|
||||
- NULL doesn't participate in the operation i.e. if one of the operands is NULL then result is NULL.
|
||||
|
||||
**Examples**:
|
||||
|
||||
|
@ -1586,7 +1585,7 @@ Query OK, 6 row(s) in set (0.002613s)
|
|||
|
||||
## Time Functions
|
||||
|
||||
From version 2.6.0.0, below time related functions can be used in TDengine.
|
||||
Since version 2.6.0.0, below time related functions can be used in TDengine.
|
||||
|
||||
### NOW
|
||||
|
||||
|
|
|
@ -3,36 +3,36 @@ sidebar_label: Interval
|
|||
title: Aggregate by Time Window
|
||||
---
|
||||
|
||||
Aggregate by time window is supported in TDengine. For example, each temperature sensor reports the temperature every second, the average temperature every 10 minutes can be retrieved by query with time window.
|
||||
Window related clauses are used to divide the data set to be queried into subsets and then aggregate. There are three kinds of windows, time window, status window, and session window. There are two kinds of time windows, sliding window and flip time window.
|
||||
Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window.
|
||||
Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are three kinds of windows: time window, status window, and session window. There are two kinds of time windows: sliding window and flip time/tumbling window.
|
||||
|
||||
## Time Window
|
||||
|
||||
`INTERVAL` clause is used to generate time windows of the same time interval, `SLIDING` is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining continuous query both the size of time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time window.
|
||||
The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window.
|
||||
|
||||

|
||||
|
||||
`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. Below SQL statement is illegal because no aggregate or selection function is used with `INTERVAL`.
|
||||
`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. The SQL statement below is illegal because no aggregate or selection function is used with `INTERVAL`.
|
||||
|
||||
```
|
||||
SELECT * FROM temp_tb_1 INTERVAL(1m);
|
||||
```
|
||||
|
||||
The time step specified by `SLIDING` can't exceed the time interval specified by `INTERVAL`. Below SQL statement is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`.
|
||||
The time step specified by `SLIDING` cannot exceed the time interval specified by `INTERVAL`. The SQL statement below is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`.
|
||||
|
||||
```
|
||||
SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m);
|
||||
```
|
||||
|
||||
When the time length specified by `SLIDING` is the same as that specified by `INTERVAL`, the sliding window is actually a flip window. The minimum time range specified by `INTERVAL` is 10 milliseconds (10a) prior to version 2.1.5.0. From version 2.1.5.0, the minimum time range by `INTERVAL` can be 1 microsecond (1u). However, if the DB precision is millisecond, the minimum time range is 1 millisecond (1a). Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side.
|
||||
When the time length specified by `SLIDING` is the same as that specified by `INTERVAL`, the sliding window is actually a flip/tumbling window. The minimum time range specified by `INTERVAL` is 10 milliseconds (10a) prior to version 2.1.5.0. Since version 2.1.5.0, the minimum time range by `INTERVAL` can be 1 microsecond (1u). However, if the DB precision is millisecond, the minimum time range is 1 millisecond (1a). Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side.
|
||||
|
||||
## Status Window
|
||||
|
||||
In case of using integer, bool, or string to represent the device status at a moment, the continuous rows with same status belong to same status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now.
|
||||
In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now.
|
||||
|
||||

|
||||
|
||||
`STATE_WINDOW` is used to specify the column based on which to define status window, for example:
|
||||
`STATE_WINDOW` is used to specify the column on which the status window will be based. For example:
|
||||
|
||||
```
|
||||
SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status);
|
||||
|
@ -44,7 +44,7 @@ SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status);
|
|||
SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val);
|
||||
```
|
||||
|
||||
The primary key, i.e. timestamp, is used to determine which session window the row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to the same session window; otherwise they belong to two different time windows. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
|
||||
The primary key, i.e. timestamp, is used to determine which session window a row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to the same session window; otherwise they belong to two different session windows. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
|
||||
|
||||

|
||||
|
||||
|
@ -73,7 +73,7 @@ SELECT function_list FROM stb_name
|
|||
|
||||
### Restrictions
|
||||
|
||||
- Aggregate functions and select functions can be used in `function_list`, with each function having only one output, for example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple output can't be used, for example DIFF or arithmetic operations.
|
||||
- Aggregate functions and select functions can be used in `function_list`, with each function having only one output. For example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple outputs, such as DIFF or arithmetic operations can't be used.
|
||||
- `LAST_ROW` can't be used together with window aggregate.
|
||||
- Scalar functions, like CEIL/FLOOR, can't be used with window aggregate.
|
||||
- `WHERE` clause can be used to specify the starting and ending time and other filter conditions
|
||||
|
@ -87,8 +87,8 @@ SELECT function_list FROM stb_name
|
|||
|
||||
:::info
|
||||
|
||||
1. Huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum interpolation values that can be returned in single query is 10,000,000.
|
||||
2. The result set is in ascending order of timestamp in aggregate by time window aggregate.
|
||||
1. A huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum number of interpolation values that can be returned in a single query is 10,000,000.
|
||||
2. The result set is in ascending order of timestamp when you aggregate by time window.
|
||||
3. If aggregate by window is used on STable, the aggregate function is performed on all the rows matching the filter conditions. If `GROUP BY` is not used in the query, the result set will be returned in ascending order of timestamp; otherwise the result set is not exactly in the order of ascending timestamp in each group.
|
||||
|
||||
:::
|
||||
|
@ -97,13 +97,13 @@ Aggregate by time window is also used in continuous query, please refer to [Cont
|
|||
|
||||
## Examples
|
||||
|
||||
The table of intelligent meters can be created by the SQL statement below:
|
||||
A table of intelligent meters can be created by the SQL statement below:
|
||||
|
||||
```sql
|
||||
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
||||
```
|
||||
|
||||
The average current, maximum current and median of current in every 10 minutes for the past 24 hours can be calculated using the below SQL statement, with missing values filled with the previous non-NULL values.
|
||||
The average current, maximum current and median of current in every 10 minutes for the past 24 hours can be calculated using the SQL statement below, with missing values filled with the previous non-NULL values.
|
||||
|
||||
```
|
||||
SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters
|
||||
|
|
|
@ -4,8 +4,8 @@ title: Limits & Restrictions
|
|||
|
||||
## Naming Rules
|
||||
|
||||
1. Only English characters, digits and underscore are allowed
|
||||
2. Can't start with a digit
|
||||
1. Only characters from the English alphabet, digits and underscore are allowed
|
||||
2. Names cannot start with a digit
|
||||
3. Case insensitive without escape character "\`"
|
||||
4. Identifier with escape character "\`"
|
||||
To support more flexible table or column names, a new escape character "\`" is introduced. For more details please refer to [escape](/taos-sql/escape).
|
||||
|
@ -16,38 +16,38 @@ The legal character set is `[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]`.
|
|||
|
||||
## General Limits
|
||||
|
||||
- Maximum length of database name is 32 bytes
|
||||
- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator
|
||||
- Maximum length of each data row is 48K bytes from version 2.1.7.0 , before which the limit is 16K bytes. Please note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
|
||||
- Maximum of column name is 64.
|
||||
- Maximum length of database name is 32 bytes.
|
||||
- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator.
|
||||
- Maximum length of each data row is 48K bytes since version 2.1.7.0 , before which the limit was 16K bytes. Please note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
|
||||
- Maximum length of column name is 64.
|
||||
- Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp.
|
||||
- Maximum length of tag name is 64.
|
||||
- Maximum number of tags is 128. There must be at least 1 tag. The total length of tag values should not exceed 16K bytes.
|
||||
- Maximum length of singe SQL statement is 1048576, i.e. 1 MB bytes. It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576].
|
||||
- At most 4096 columns (or 1024 prior to 2.1.7.0) can be returned by `SELECT`, functions in the query statement may constitute columns. Error will be returned if the limit is exceeded.
|
||||
- Maximum numbers of databases, STables, tables are only depending on the system resources.
|
||||
- Maximum length of singe SQL statement is 1048576, i.e. 1 MB. It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576].
|
||||
- At most 4096 columns (or 1024 prior to 2.1.7.0) can be returned by `SELECT`. Functions in the query statement constitute columns. An error is returned if the limit is exceeded.
|
||||
- Maximum numbers of databases, STables, tables are dependent only on the system resources.
|
||||
- Maximum of database name is 32 bytes, and it can't include "." or special characters.
|
||||
- Maximum replica number of database is 3
|
||||
- Maximum length of user name is 23 bytes
|
||||
- Maximum length of password is 15 bytes
|
||||
- Maximum number of rows depends on the storage space only.
|
||||
- Maximum number of tables depends on the number of nodes only.
|
||||
- Maximum number of databases depends on the number of nodes only.
|
||||
- Maximum number of vnodes for single database is 64.
|
||||
- Maximum number of replicas for a database is 3.
|
||||
- Maximum length of user name is 23 bytes.
|
||||
- Maximum length of password is 15 bytes.
|
||||
- Maximum number of rows depends only on the storage space.
|
||||
- Maximum number of tables depends only on the number of nodes.
|
||||
- Maximum number of databases depends only on the number of nodes.
|
||||
- Maximum number of vnodes for a single database is 64.
|
||||
|
||||
## Restrictions of `GROUP BY`
|
||||
|
||||
`GROUP BY` can be performed on tags and `TBNAME`. It can be performed on data columns too, with one restriction that only one column and the number of unique values on that column is lower than 100,000. Please note that `GROUP BY` can't be performed on float or double types.
|
||||
`GROUP BY` can be performed on tags and `TBNAME`. It can be performed on data columns too, with the only restriction being it can only be performed on one data column and the number of unique values in that column is lower than 100,000. Please note that `GROUP BY` cannot be performed on float or double types.
|
||||
|
||||
## Restrictions of `IS NOT NULL`
|
||||
|
||||
`IS NOT NULL` can be used on any data type of columns. The non-empty string evaluation expression, i.e. `<\>""` can only be used on non-numeric data types.
|
||||
`IS NOT NULL` can be used on any data type of columns. The non-empty string evaluation expression, i.e. `< > ""` can only be used on non-numeric data types.
|
||||
|
||||
## Restrictions of `ORDER BY`
|
||||
|
||||
- Only one `order by` is allowed for normal table and subtable.
|
||||
- At most two `order by` are allowed for STable, and the second one must be `ts`.
|
||||
- `order by tag` must be used with `group by tag` on same tag, this rule is also applicable to `tbname`.
|
||||
- `order by tag` must be used with `group by tag` on same tag. This rule is also applicable to `tbname`.
|
||||
- `order by column` must be used with `group by column` or `top/bottom` on same column. This rule is applicable to table and STable.
|
||||
- `order by ts` is applicable to table and STable.
|
||||
- If `order by ts` is used with `group by`, the result set is sorted using `ts` in each group.
|
||||
|
@ -56,7 +56,7 @@ The legal character set is `[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]`.
|
|||
|
||||
### Name Restrictions of Table/Column
|
||||
|
||||
The name of a table or column can only be composed of ASCII characters, digits and underscore, while it can't start with a digit. The maximum length is 192 bytes. Names are case insensitive. The name mentioned in this rule doesn't include the database name prefix and the separator.
|
||||
The name of a table or column can only be composed of ASCII characters, digits and underscore and it cannot start with a digit. The maximum length is 192 bytes. Names are case insensitive. The name mentioned in this rule doesn't include the database name prefix and the separator.
|
||||
|
||||
### Name Restrictions After Escaping
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ title: JSON Type
|
|||
|
||||
## Syntax
|
||||
|
||||
1. Tag of JSON type
|
||||
1. Tag of type JSON
|
||||
|
||||
```sql
|
||||
create STable s1 (ts timestamp, v1 int) tags (info json);
|
||||
|
@ -12,7 +12,7 @@ title: JSON Type
|
|||
create table s1_1 using s1 tags ('{"k1": "v1"}');
|
||||
```
|
||||
|
||||
2. -> Operator of JSON
|
||||
2. "->" Operator of JSON
|
||||
|
||||
```sql
|
||||
select * from s1 where info->'k1' = 'v1';
|
||||
|
@ -20,7 +20,7 @@ title: JSON Type
|
|||
select info->'k1' from s1;
|
||||
```
|
||||
|
||||
3. contains Operator of JSON
|
||||
3. "contains" Operator of JSON
|
||||
|
||||
```sql
|
||||
select * from s1 where info contains 'k2';
|
||||
|
@ -30,7 +30,7 @@ title: JSON Type
|
|||
|
||||
## Applicable Operations
|
||||
|
||||
1. When JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used.
|
||||
1. When a JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used.
|
||||
|
||||
```sql
|
||||
select * from s1 where info->'k1' match 'v*';
|
||||
|
@ -42,9 +42,9 @@ title: JSON Type
|
|||
select * from s1 where info->'k1' is not null;
|
||||
```
|
||||
|
||||
2. Tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query, for example `group by json->'key'`
|
||||
2. A tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query; for example `group by json->'key'`
|
||||
|
||||
3. `Distinct` can be used with tag of JSON type
|
||||
3. `Distinct` can be used with a tag of type JSON
|
||||
|
||||
```sql
|
||||
select distinct info->'k1' from s1;
|
||||
|
@ -52,9 +52,9 @@ title: JSON Type
|
|||
|
||||
4. Tag Operations
|
||||
|
||||
The value of JSON tag can be altered. Please note that the full JSON will be overriden when doing this.
|
||||
The value of a JSON tag can be altered. Please note that the full JSON will be overriden when doing this.
|
||||
|
||||
The name of JSON tag can be altered. A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed.
|
||||
The name of a JSON tag can be altered. A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed.
|
||||
|
||||
## Other Restrictions
|
||||
|
||||
|
@ -64,17 +64,17 @@ title: JSON Type
|
|||
|
||||
- JSON format:
|
||||
|
||||
- The input string for JSON can be empty, i.e. "", "\t", or NULL, but can't be non-NULL string, bool or array.
|
||||
- object can be {}, and the whole JSON is empty if so. Key can be "", and it's ignored if so.
|
||||
- value can be int, double, string, boll or NULL, can't be array. Nesting is not allowed, that means value can't be another JSON.
|
||||
- The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array.
|
||||
- object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so.
|
||||
- value can be int, double, string, bool or NULL, and it can't be an array. Nesting is not allowed which means that the value of a key can't be JSON.
|
||||
- If one key occurs twice in JSON, only the first one is valid.
|
||||
- Escape characters are not allowed in JSON.
|
||||
|
||||
- NULL is returned if querying a key that doesn't exist in JSON.
|
||||
- NULL is returned when querying a key that doesn't exist in JSON.
|
||||
|
||||
- If a tag of JSON is the result of inner query, it can't be parsed and queried in the outer query.
|
||||
|
||||
For example, the below SQL statements are not supported.
|
||||
For example, the SQL statements below are not supported.
|
||||
|
||||
```sql;
|
||||
select jtag->'key' from (select jtag from STable);
|
||||
|
|
|
@ -6,7 +6,7 @@ description: Install, Uninstall, Start, Stop and Upgrade
|
|||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
TDengine community version provides dev and rpm packages for users to choose based on the system environment. deb supports Debian, Ubuntu and systems derived from them. rpm supports CentOS, RHEL, SUSE and systems derived from them. Furthermore, tar.gz package is provided for enterprise customers.
|
||||
TDengine community version provides deb and rpm packages for users to choose from, based on their system environment. The deb package supports Debian, Ubuntu and derivative systems. The rpm package supports CentOS, RHEL, SUSE and derivative systems. Furthermore, a tar.gz package is provided for TDengine Enterprise customers.
|
||||
|
||||
## Install
|
||||
|
||||
|
@ -124,7 +124,7 @@ taoskeeper is installed, enable it by `systemctl enable taoskeeper`
|
|||
```
|
||||
|
||||
:::info
|
||||
Some configuration will be prompted for users to provide when install.sh is executing, the interactive mode can be disabled by executing `./install.sh -e no`. `./install -h` can show all parameters and detailed explanation.
|
||||
Users will be prompted to enter some configuration information when install.sh is executing. The interactive mode can be disabled by executing `./install.sh -e no`. `./install.sh -h` can show all parameters with detailed explanation.
|
||||
|
||||
:::
|
||||
|
||||
|
@ -132,7 +132,7 @@ Some configuration will be prompted for users to provide when install.sh is exec
|
|||
</Tabs>
|
||||
|
||||
:::note
|
||||
When installing on the first node in the cluster, when "Enter FQDN:" is prompted, nothing needs to be provided. When installing on following nodes, when "Enter FQDN:" is prompted, the end point of the first dnode in the cluster can be input if it is already up; or just ignore it and configure later after installation is done.
|
||||
When installing on the first node in the cluster, at the "Enter FQDN:" prompt, nothing needs to be provided. When installing on subsequent nodes, at the "Enter FQDN:" prompt, you must enter the end point of the first dnode in the cluster if it is already up. You can also just ignore it and configure it later after installation is finished.
|
||||
|
||||
:::
|
||||
|
||||
|
@ -181,14 +181,14 @@ taosKeeper is removed successfully!
|
|||
|
||||
:::note
|
||||
|
||||
- It's strongly suggested not to use multiple kinds of installation packages on a single host TDengine
|
||||
- After deb package is installed, if the installation directory is removed manually so that uninstall or reinstall can't succeed, it can be resolved by cleaning up TDengine package information as in the command below and then reinstalling.
|
||||
- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine.
|
||||
- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed.
|
||||
|
||||
```bash
|
||||
$ sudo rm -f /var/lib/dpkg/info/tdengine*
|
||||
```
|
||||
|
||||
- After rpm package is installed, if the installation directory is removed manually so that uninstall or reinstall can't succeed, it can be resolved by cleaning up TDengine package information as in the command below and then reinstalling.
|
||||
- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed.
|
||||
|
||||
```bash
|
||||
$ sudo rpm -e --noscripts tdengine
|
||||
|
@ -219,7 +219,7 @@ lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/
|
|||
During the installation process:
|
||||
|
||||
- Configuration directory, data directory, and log directory are created automatically if they don't exist
|
||||
- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg if not existing
|
||||
- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg
|
||||
- The default data directory is /var/lib/taos, which is a soft link to /usr/local/taos/data
|
||||
- The default log directory is /var/log/taos, which is a soft link to /usr/local/taos/log
|
||||
- The executables at /usr/local/taos/bin are linked to /usr/bin
|
||||
|
@ -228,7 +228,7 @@ During the installation process:
|
|||
|
||||
:::note
|
||||
|
||||
- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution because data can't be recovered
|
||||
- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data.
|
||||
- When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used.
|
||||
|
||||
## Start and Stop
|
||||
|
@ -263,18 +263,19 @@ Active: inactive (dead)
|
|||
|
||||
There are two aspects in upgrade operation: upgrade installation package and upgrade a running server.
|
||||
|
||||
Upgrading package should follow the steps mentioned previously to first uninstall the old version then install the new version.
|
||||
To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version.
|
||||
|
||||
Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 section match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
|
||||
Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
|
||||
|
||||
- Stop inserting data
|
||||
- Make sure all data are persisted into disk
|
||||
- Make sure all data is persisted to disk
|
||||
- Make some simple queries (Such as total rows in stables, tables and so on. Note down the values. Follow best practices and relevant SOPs.)
|
||||
- Stop the cluster of TDengine
|
||||
- Uninstall old version and install new version
|
||||
- Start the cluster of TDengine
|
||||
- Make some simple queries to make sure no data loss
|
||||
- Make some simple data insertion to make sure the cluster works well
|
||||
- Restore business data
|
||||
- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss
|
||||
- Run some simple data insertion statements to make sure the cluster works well
|
||||
- Restore business services
|
||||
|
||||
:::warning
|
||||
|
||||
|
|
|
@ -2,17 +2,17 @@
|
|||
title: Resource Planning
|
||||
---
|
||||
|
||||
The computing and storage resources need to be planned if using TDengine to build an IoT platform. How to plan the CPU, memory and disk required will be described in this chapter.
|
||||
It is important to plan computing and storage resources if using TDengine to build an IoT, time-series or Big Data platform. How to plan the CPU, memory and disk resources required, will be described in this chapter.
|
||||
|
||||
## Memory Requirement of Server Side
|
||||
|
||||
The number of vgroups created for each database is the same as the number of CPU cores by default and can be configured by parameter `maxVgroupsPerDb`, each vnode in a vgroup stores one replica. Each vnode consumes a fixed size of memory, i.e. `blocks` \* `cache`. Besides, some memory is required for tag values associated with each table. A fixed amount of memory is required for each cluster. So, the memory required for each DB can be calculated using the formula below:
|
||||
By default, the number of vgroups created for each database is the same as the number of CPU cores. This can be configured by the parameter `maxVgroupsPerDb`. Each vnode in a vgroup stores one replica. Each vnode consumes a fixed amount of memory, i.e. `blocks` \* `cache`. In addition, some memory is required for tag values associated with each table. A fixed amount of memory is required for each cluster. So, the memory required for each DB can be calculated using the formula below:
|
||||
|
||||
```
|
||||
Database Memory Size = maxVgroupsPerDb * replica * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB)
|
||||
```
|
||||
|
||||
For example, assuming the default value of `maxVgroupPerDB` is 64, the default value of `cache` 16M, the default value of `blocks` is 6, there are 100,000 tables in a DB, the replica number is 1, total length of tag values is 256 bytes, the total memory required for this DB is: 64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M.
|
||||
For example, assuming the default value of `maxVgroupPerDB` is 64, the default value of `cache` is 16M, the default value of `blocks` is 6, there are 100,000 tables in a DB, the replica number is 1, total length of tag values is 256 bytes, the total memory required for this DB is: 64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M.
|
||||
|
||||
In the real operation of TDengine, we are more concerned about the memory used by each TDengine server process `taosd`.
|
||||
|
||||
|
@ -22,10 +22,10 @@ In the real operation of TDengine, we are more concerned about the memory used b
|
|||
|
||||
In the above formula:
|
||||
|
||||
1. "vnode_memory" of a `taosd` process is the memory used by all vnodes hosted by this `taosd` process. It can be roughly calculated by firstly adding up the total memory of all DBs whose memory usage can be derived according to the formula mentioned previously then dividing by number of dnodes and multiplying the number of replicas.
|
||||
1. "vnode_memory" of a `taosd` process is the memory used by all vnodes hosted by this `taosd` process. It can be roughly calculated by firstly adding up the total memory of all DBs whose memory usage can be derived according to the formula for Database Memory Size, mentioned above, then dividing by number of dnodes and multiplying the number of replicas.
|
||||
|
||||
```
|
||||
vnode_memory = sum(Database memory) / number_of_dnodes * replica
|
||||
vnode_memory = (sum(Database Memory Size) / number_of_dnodes) * replica
|
||||
```
|
||||
|
||||
2. "mnode_memory" of a `taosd` process is the memory consumed by a mnode. If there is one (and only one) mnode hosted in a `taosd` process, the memory consumed by "mnode" is "0.2KB \* the total number of tables in the cluster".
|
||||
|
@ -56,8 +56,8 @@ So, at least 3GB needs to be reserved for such a client.
|
|||
|
||||
The CPU resources required depend on two aspects:
|
||||
|
||||
- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The computing resource consumed between inserting 1 row one time and inserting 10 rows one time is very small. So, the more the rows to insert one time, the higher the efficiency. Inserting in bach also exposes requirements for the client side which needs to cache rows and insert in batch once the cached rows reaches a threshold.
|
||||
- **Data Query** High efficiency query is provided in TDengine, but it's hard to estimate the CPU resource required because the queries used in different use cases and the frequency of queries vary significantly. It can only be verified with the query statements, query frequency, data size to be queried, etc provided by user.
|
||||
- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The difference in computing resource consumed, between inserting 1 row at a time, and inserting 10 rows at a time is very small. So, the more the number of rows that can be inserted one time, the higher the efficiency. Inserting in batch also imposes requirements on the client side which needs to cache rows to insert in batch once the number of cached rows reaches a threshold.
|
||||
- **Data Query** High efficiency query is provided in TDengine, but it's hard to estimate the CPU resource required because the queries used in different use cases and the frequency of queries vary significantly. It can only be verified with the query statements, query frequency, data size to be queried, and other requirements provided by users.
|
||||
|
||||
In short, the CPU resource required for data insertion can be estimated but it's hard to do so for query use cases. In real operation, it's suggested to control CPU usage below 50%. If this threshold is exceeded, it's a reminder for system operator to add more nodes in the cluster to expand resources.
|
||||
|
||||
|
@ -71,12 +71,12 @@ Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable
|
|||
|
||||
For example, there are 10,000,000 meters, while each meter collects data every 15 minutes and the data size of each collection is 128 bytes, so the raw data size of one year is: 10000000 \* 128 \* 24 \* 60 / 15 \* 365 = 44.8512(TB). Assuming compression ratio is 5, the actual disk size is: 44.851 / 5 = 8.97024(TB).
|
||||
|
||||
Parameter `keep` can be used to set how long the data will be kept on disk. To further reduce storage cost, multiple storage levels can be enabled in TDengine, with the coldest data stored on the cheapest storage device, and this is transparent to application programs.
|
||||
Parameter `keep` can be used to set how long the data will be kept on disk. To further reduce storage cost, multiple storage levels can be enabled in TDengine, with the coldest data stored on the cheapest storage device. This is completely transparent to application programs.
|
||||
|
||||
To increase the performance, multiple disks can be setup for parallel data reading or data inserting. Please note that an expensive disk array is not necessary because replications are used in TDengine to provide high availability.
|
||||
To increase performance, multiple disks can be setup for parallel data reading or data inserting. Please note that an expensive disk array is not necessary because replications are used in TDengine to provide high availability.
|
||||
|
||||
## Number of Hosts
|
||||
|
||||
A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulas mentioned previously. Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily.
|
||||
A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulae mentioned previously. Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily.
|
||||
|
||||
**Quick Estimation for CPU, Memory and Disk** Please refer to [Resource Estimate](https://www.taosdata.com/config/config.html).
|
||||
|
|
|
@ -7,26 +7,26 @@ title: Fault Tolerance & Disaster Recovery
|
|||
|
||||
TDengine uses **WAL**, i.e. Write Ahead Log, to achieve fault tolerance and high reliability.
|
||||
|
||||
When a data block is received by TDengine, the original data block is first written into WAL. The log in WAL will be deleted only after the data has been written into data files in the database. Data can be recovered from WAL in case the server is stopped abnormally due to any reason and then restarted.
|
||||
When a data block is received by TDengine, the original data block is first written into WAL. The log in WAL will be deleted only after the data has been written into data files in the database. Data can be recovered from WAL in case the server is stopped abnormally for any reason and then restarted.
|
||||
|
||||
There are 2 configuration parameters related to WAL:
|
||||
|
||||
- walLevel:
|
||||
- 0:wal is disabled;
|
||||
- 1:wal is enabled without fsync;
|
||||
- 2:wal is enabled with fsync.
|
||||
- fsync:only valid when walLevel is set to 2, it specifies the interval of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written.
|
||||
- 0:wal is disabled
|
||||
- 1:wal is enabled without fsync
|
||||
- 2:wal is enabled with fsync
|
||||
- fsync:This parameter is only valid when walLevel is set to 2. It specifies the interval, in milliseconds, of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written.
|
||||
|
||||
To achieve absolutely no data loss, walLevel needs to be set to 2 and fsync needs to be set to 1. The penalty is the performance of data ingestion downgrades. However, if the concurrent threads of data insertion on the client side can reach a big enough number, for example 50, the data ingestion performance would be still good enough, our verification shows that the drop is only 30% compared to fsync is set to 3,000 milliseconds.
|
||||
To achieve absolutely no data loss, walLevel should be set to 2 and fsync should be set to 1. There is a performance penalty to the data ingestion rate. However, if the concurrent data insertion threads on the client side can reach a big enough number, for example 50, the data ingestion performance will be still good enough. Our verification shows that the drop is only 30% when fsync is set to 3,000 milliseconds.
|
||||
|
||||
## Disaster Recovery
|
||||
|
||||
TDengine uses replications to provide high availability and disaster recovery capability.
|
||||
TDengine uses replication to provide high availability and disaster recovery capability.
|
||||
|
||||
TDengine cluster is managed by mnode. To make sure the high availability of mnode, multiple replicas can be configured by the system parameter `numOfMnodes`. The data replication between mnode replicas is performed in a synchronous way to guarantee the metadata consistency.
|
||||
A TDengine cluster is managed by mnode. To ensure the high availability of mnode, multiple replicas can be configured by the system parameter `numOfMnodes`. The data replication between mnode replicas is performed in a synchronous way to guarantee metadata consistency.
|
||||
|
||||
The number of replicas for the time series data in TDengine is associated with each database, there can be a lot of databases in a cluster while each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1.
|
||||
The number of replicas for time series data in TDengine is associated with each database. There can be many databases in a cluster and each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1.
|
||||
|
||||
The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create a table.
|
||||
|
||||
As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is set to bigger than 1, high availability can be achieved without any other assistance. If dnodes of TDengine cluster are deployed in geographically different data centers, disaster recovery can be achieved too.
|
||||
As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is higher than 1, high availability can be achieved without any other assistance. For disaster recovery, dnodes of a TDengine cluster should be deployed in geographically different data centers.
|
||||
|
|
|
@ -2,11 +2,13 @@
|
|||
title: Data Export
|
||||
---
|
||||
|
||||
There are two ways of exporting data from a TDengine cluster, one is SQL statement in TDengine CLI, the other one is `taosdump`.
|
||||
There are two ways of exporting data from a TDengine cluster:
|
||||
- Using a SQL statement in TDengine CLI
|
||||
- Using the `taosdump` tool
|
||||
|
||||
## Export Using SQL
|
||||
|
||||
If you want to export the data of a table or a STable, please execute below SQL statement in TDengine CLI.
|
||||
If you want to export the data of a table or a STable, please execute the SQL statement below, in the TDengine CLI.
|
||||
|
||||
```sql
|
||||
select * from <tb_name> >> data.csv;
|
||||
|
@ -16,4 +18,4 @@ The data of table or STable specified by `tb_name` will be exported into a file
|
|||
|
||||
## Export Using taosdump
|
||||
|
||||
With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump).
|
||||
With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose to export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump).
|
||||
|
|
|
@ -3,7 +3,7 @@ sidebar_label: Connections & Tasks
|
|||
title: Manage Connections and Query Tasks
|
||||
---
|
||||
|
||||
A system operator can use TDengine CLI to show the connections, ongoing queries, stream computing, and can close connection or stop ongoing query task or stream computing.
|
||||
A system operator can use the TDengine CLI to show connections, ongoing queries, stream computing, and can close connections or stop ongoing query tasks or stream computing.
|
||||
|
||||
## Show Connections
|
||||
|
||||
|
@ -13,7 +13,7 @@ SHOW CONNECTIONS;
|
|||
|
||||
One column of the output of the above SQL command is "ip:port", which is the end point of the client.
|
||||
|
||||
## Close Connections Forcedly
|
||||
## Force Close Connections
|
||||
|
||||
```sql
|
||||
KILL CONNECTION <connection-id>;
|
||||
|
@ -27,9 +27,9 @@ In the above SQL command, `connection-id` is from the first column of the output
|
|||
SHOW QUERIES;
|
||||
```
|
||||
|
||||
The first column of the output is query ID, which is composed of the corresponding connection ID and the sequence number of the current query task started on this connection, in format of "connection-id:query-no".
|
||||
The first column of the output is query ID, which is composed of the corresponding connection ID and the sequence number of the current query task started on this connection. The format is "connection-id:query-no".
|
||||
|
||||
## Close Queries Forcedly
|
||||
## Force Close Queries
|
||||
|
||||
```sql
|
||||
KILL QUERY <query-id>;
|
||||
|
@ -43,9 +43,9 @@ In the above SQL command, `query-id` is from the first column of the output of `
|
|||
SHOW STREAMS;
|
||||
```
|
||||
|
||||
The first column of the output is stream ID, which is composed of the connection ID and the sequence number of the current stream started on this connection, in the format of "connection-id:stream-no".
|
||||
The first column of the output is stream ID, which is composed of the connection ID and the sequence number of the current stream started on this connection. The format is "connection-id:stream-no".
|
||||
|
||||
## Close Continuous Query Forcedly
|
||||
## Force Close Continuous Query
|
||||
|
||||
```sql
|
||||
KILL STREAM <stream-id>;
|
||||
|
|
|
@ -2,13 +2,13 @@
|
|||
title: TDengine Monitoring
|
||||
---
|
||||
|
||||
After TDengine is started, a database named `log` for monitoring is created automatically. The information about CPU, memory, disk, bandwidth, number of requests, disk I/O speed, slow query is written into `log` database on the basis of a predefined interval. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console.
|
||||
After TDengine is started, a database named `log` is created automatically to help with monitoring. Information that includes CPU, memory and disk usage, bandwidth, number of requests, disk I/O speed, slow queries, is written into the `log` database at a predefined interval. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console.
|
||||
|
||||
The collection of the monitoring information is enabled by default, but can be disabled by parameter `monitor` in the configuration file.
|
||||
|
||||
## TDinsight
|
||||
|
||||
TDinsight is a complete solution which uses the monitor database `log` mentioned previously and Grafana to monitor a TDengine cluster.
|
||||
TDinsight is a complete solution which uses the monitoring database `log` mentioned previously, and Grafana, to monitor a TDengine cluster.
|
||||
|
||||
From version 2.3.3.0, more monitoring data has been added in the `log` database. Please refer to [TDinsight Grafana Dashboard](https://grafana.com/grafana/dashboards/15167) to learn more details about using TDinsight to monitor TDengine.
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
title: Administration
|
||||
---
|
||||
|
||||
This chapter is mainly written for system administrators, covering download, install/uninstall, data import/export, system monitoring, user management, connection management, etc. Capacity planning and system optimization are also covered.
|
||||
This chapter is mainly written for system administrators. It covers download, install/uninstall, data import/export, system monitoring, user management, connection management, capacity planning and system optimization.
|
||||
|
||||
```mdx-code-block
|
||||
import DocCardList from '@theme/DocCardList';
|
||||
|
|
|
@ -20,4 +20,4 @@ func main() {
|
|||
|
||||
// use
|
||||
// var taosDSN = "root:taosdata@tcp(localhost:6030)/dbName"
|
||||
// if you want to connect to a default database.
|
||||
// if you want to connect a specified database named "dbName".
|
||||
|
|
|
@ -18,6 +18,6 @@ func main() {
|
|||
defer taos.Close()
|
||||
}
|
||||
|
||||
// use
|
||||
// use
|
||||
// var taosDSN = "root:taosdata@http(localhost:6041)/dbName"
|
||||
// if you want to connect to a default database.
|
||||
// if you want to connect a specified database named "dbName".
|
||||
|
|
|
@ -22,4 +22,4 @@ public class JNIConnectExample {
|
|||
|
||||
// use
|
||||
// String jdbcUrl = "jdbc:TAOS://localhost:6030/dbName?user=root&password=taosdata";
|
||||
// if you want to connect to a default database.
|
||||
// if you want to connect a specified database named "dbName".
|
|
@ -66,12 +66,6 @@ typedef struct SSyncCfg {
|
|||
SNodeInfo nodeInfo[TSDB_MAX_REPLICA];
|
||||
} SSyncCfg;
|
||||
|
||||
typedef struct SSnapshot {
|
||||
void* data;
|
||||
SyncIndex lastApplyIndex;
|
||||
SyncTerm lastApplyTerm;
|
||||
} SSnapshot;
|
||||
|
||||
typedef struct SFsmCbMeta {
|
||||
SyncIndex index;
|
||||
bool isWeak;
|
||||
|
@ -93,6 +87,12 @@ typedef struct SReConfigCbMeta {
|
|||
uint64_t flag;
|
||||
} SReConfigCbMeta;
|
||||
|
||||
typedef struct SSnapshot {
|
||||
void *data;
|
||||
SyncIndex lastApplyIndex;
|
||||
SyncTerm lastApplyTerm;
|
||||
} SSnapshot;
|
||||
|
||||
typedef struct SSyncFSM {
|
||||
void* data;
|
||||
|
||||
|
@ -101,23 +101,17 @@ typedef struct SSyncFSM {
|
|||
void (*FpRollBackCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
|
||||
|
||||
void (*FpRestoreFinishCb)(struct SSyncFSM* pFsm);
|
||||
int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
|
||||
|
||||
// if (*ppIter == NULL)
|
||||
// *ppIter = new iter;
|
||||
// else
|
||||
// *ppIter.next();
|
||||
//
|
||||
// if success, return 0. else return error code
|
||||
int32_t (*FpSnapshotRead)(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, void** ppIter, char** ppBuf,
|
||||
int32_t* len);
|
||||
|
||||
// apply data into fsm
|
||||
int32_t (*FpSnapshotApply)(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, char* pBuf, int32_t len);
|
||||
|
||||
void (*FpReConfigCb)(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta);
|
||||
|
||||
// int32_t (*FpRestoreSnapshot)(struct SSyncFSM* pFsm, const SSnapshot* snapshot);
|
||||
int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
|
||||
|
||||
int32_t (*FpSnapshotStartRead)(struct SSyncFSM* pFsm, void** ppReader);
|
||||
int32_t (*FpSnapshotStopRead)(struct SSyncFSM* pFsm, void* pReader);
|
||||
int32_t (*FpSnapshotDoRead)(struct SSyncFSM* pFsm, void* pReader, void** ppBuf, int32_t* len);
|
||||
|
||||
int32_t (*FpSnapshotStartWrite)(struct SSyncFSM* pFsm, void** ppWriter);
|
||||
int32_t (*FpSnapshotStopWrite)(struct SSyncFSM* pFsm, void* pWriter, bool isApply);
|
||||
int32_t (*FpSnapshotDoWrite)(struct SSyncFSM* pFsm, void* pWriter, void* pBuf, int32_t len);
|
||||
|
||||
} SSyncFSM;
|
||||
|
||||
|
|
|
@ -1463,7 +1463,7 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
|
|||
}
|
||||
|
||||
void blockDebugShowData(const SArray* dataBlocks) {
|
||||
char pBuf[128];
|
||||
char pBuf[128] = {0};
|
||||
int32_t sz = taosArrayGetSize(dataBlocks);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SSDataBlock* pDataBlock = taosArrayGet(dataBlocks, i);
|
||||
|
|
|
@ -783,7 +783,7 @@ int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precisio
|
|||
// 2020-07-03 17:48:42
|
||||
// and the parameter can also be a variable.
|
||||
const char* fmtts(int64_t ts) {
|
||||
static char buf[96];
|
||||
static char buf[96] = {0};
|
||||
size_t pos = 0;
|
||||
struct tm tm;
|
||||
|
||||
|
|
|
@ -369,8 +369,8 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) {
|
|||
mError("failed to process sync msg:%p type:%s since %s", pMsg, TMSG_INFO(pMsg->msgType), terrstr());
|
||||
return TAOS_SYNC_PROPOSE_OTHER_ERROR;
|
||||
}
|
||||
|
||||
char logBuf[512];
|
||||
|
||||
char logBuf[512] = {0};
|
||||
char *syncNodeStr = sync2SimpleStr(pMgmt->sync);
|
||||
snprintf(logBuf, sizeof(logBuf), "==vnodeProcessSyncReq== msgType:%d, syncNode: %s", pMsg->msgType, syncNodeStr);
|
||||
syncRpcMsgLog2(logBuf, pMsg);
|
||||
|
|
|
@ -68,37 +68,11 @@ void mndRestoreFinish(struct SSyncFSM *pFsm) {
|
|||
mInfo("mnode sync restore finished");
|
||||
mndTransPullup(pMnode);
|
||||
mndSetRestore(pMnode, true);
|
||||
} else {
|
||||
mInfo("mnode sync restore finished, and will set ready after first deploy");
|
||||
}
|
||||
}
|
||||
|
||||
int32_t mndSnapshotRead(struct SSyncFSM *pFsm, const SSnapshot *pSnapshot, void **ppIter, char **ppBuf, int32_t *len) {
|
||||
SMnode *pMnode = pFsm->data;
|
||||
mInfo("start to read snapshot from sdb");
|
||||
|
||||
// sdbStartRead
|
||||
// sdbDoRead
|
||||
// sdbStopRead
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t mndSnapshotApply(struct SSyncFSM *pFsm, const SSnapshot *pSnapshot, char *pBuf, int32_t len) {
|
||||
SMnode *pMnode = pFsm->data;
|
||||
|
||||
// sdbStartWrite
|
||||
// sdbDoWrite
|
||||
|
||||
mndSetRestore(pMnode, false);
|
||||
mInfo("start to apply snapshot to sdb");
|
||||
|
||||
// sdbStopWrite
|
||||
mInfo("successfully to apply snapshot to sdb");
|
||||
mndSetRestore(pMnode, true);
|
||||
|
||||
// taosMemoryFree(pBuf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mndReConfig(struct SSyncFSM *pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) {
|
||||
SMnode *pMnode = pFsm->data;
|
||||
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
||||
|
@ -115,20 +89,55 @@ void mndReConfig(struct SSyncFSM *pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta)
|
|||
}
|
||||
}
|
||||
|
||||
int32_t mndSnapshotStartRead(struct SSyncFSM *pFsm, void **ppReader) {
|
||||
mInfo("start to read snapshot from sdb");
|
||||
SMnode *pMnode = pFsm->data;
|
||||
return sdbStartRead(pMnode->pSdb, (SSdbIter **)ppReader);
|
||||
}
|
||||
|
||||
int32_t mndSnapshotStopRead(struct SSyncFSM *pFsm, void *pReader) {
|
||||
mInfo("stop to read snapshot from sdb");
|
||||
SMnode *pMnode = pFsm->data;
|
||||
return sdbStopRead(pMnode->pSdb, pReader);
|
||||
}
|
||||
|
||||
int32_t mndSnapshotDoRead(struct SSyncFSM *pFsm, void *pReader, void **ppBuf, int32_t *len) {
|
||||
SMnode *pMnode = pFsm->data;
|
||||
return sdbDoRead(pMnode->pSdb, pReader, ppBuf, len);
|
||||
}
|
||||
|
||||
int32_t mndSnapshotStartWrite(struct SSyncFSM *pFsm, void **ppWriter) {
|
||||
mInfo("start to apply snapshot to sdb");
|
||||
SMnode *pMnode = pFsm->data;
|
||||
return sdbStartWrite(pMnode->pSdb, (SSdbIter **)ppWriter);
|
||||
}
|
||||
|
||||
int32_t mndSnapshotStopWrite(struct SSyncFSM *pFsm, void *pWriter, bool isApply) {
|
||||
mInfo("stop to apply snapshot to sdb, apply:%d", isApply);
|
||||
SMnode *pMnode = pFsm->data;
|
||||
return sdbStopWrite(pMnode->pSdb, pWriter, isApply);
|
||||
}
|
||||
|
||||
int32_t mndSnapshotDoWrite(struct SSyncFSM *pFsm, void *pWriter, void *pBuf, int32_t len) {
|
||||
SMnode *pMnode = pFsm->data;
|
||||
return sdbDoWrite(pMnode->pSdb, pWriter, pBuf, len);
|
||||
}
|
||||
|
||||
SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) {
|
||||
SSyncFSM *pFsm = taosMemoryCalloc(1, sizeof(SSyncFSM));
|
||||
pFsm->data = pMnode;
|
||||
|
||||
pFsm->FpCommitCb = mndSyncCommitMsg;
|
||||
pFsm->FpPreCommitCb = NULL;
|
||||
pFsm->FpRollBackCb = NULL;
|
||||
|
||||
pFsm->FpGetSnapshot = mndSyncGetSnapshot;
|
||||
pFsm->FpRestoreFinishCb = mndRestoreFinish;
|
||||
pFsm->FpSnapshotRead = mndSnapshotRead;
|
||||
pFsm->FpSnapshotApply = mndSnapshotApply;
|
||||
pFsm->FpReConfigCb = mndReConfig;
|
||||
|
||||
pFsm->FpGetSnapshot = mndSyncGetSnapshot;
|
||||
pFsm->FpSnapshotStartRead = mndSnapshotStartRead;
|
||||
pFsm->FpSnapshotStopRead = mndSnapshotStopRead;
|
||||
pFsm->FpSnapshotDoRead = mndSnapshotDoRead;
|
||||
pFsm->FpSnapshotStartWrite = mndSnapshotStartWrite;
|
||||
pFsm->FpSnapshotStopWrite = mndSnapshotStopWrite;
|
||||
pFsm->FpSnapshotDoWrite = mndSnapshotDoWrite;
|
||||
return pFsm;
|
||||
}
|
||||
|
||||
|
|
|
@ -522,7 +522,6 @@ int32_t sdbDoRead(SSdb *pSdb, SSdbIter *pIter, void **ppBuf, int32_t *len) {
|
|||
void *pBuf = taosMemoryCalloc(1, maxlen);
|
||||
if (pBuf == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
sdbCloseIter(pIter);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -260,7 +260,7 @@ int vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
|
|||
|
||||
SMsgHead *pHead = pMsg->pCont;
|
||||
|
||||
char logBuf[512];
|
||||
char logBuf[512] = {0};
|
||||
char *syncNodeStr = sync2SimpleStr(pVnode->sync);
|
||||
snprintf(logBuf, sizeof(logBuf), "==vnodeProcessSyncReq== msgType:%d, syncNode: %s", pMsg->msgType, syncNodeStr);
|
||||
syncRpcMsgLog2(logBuf, pMsg);
|
||||
|
|
|
@ -80,7 +80,7 @@ void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta)
|
|||
}
|
||||
|
||||
if (cbMeta.index > beginIndex) {
|
||||
char logBuf[256];
|
||||
char logBuf[256] = {0};
|
||||
snprintf(
|
||||
logBuf, sizeof(logBuf),
|
||||
"==callback== ==CommitCb== execute, pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s, beginIndex :%ld\n",
|
||||
|
@ -115,7 +115,7 @@ void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta)
|
|||
tmsgPutToQueue(&(pVnode->msgCb), APPLY_QUEUE, &applyMsg);
|
||||
|
||||
} else {
|
||||
char logBuf[256];
|
||||
char logBuf[256] = {0};
|
||||
snprintf(logBuf, sizeof(logBuf),
|
||||
"==callback== ==CommitCb== do not execute, pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s, "
|
||||
"beginIndex :%ld\n",
|
||||
|
@ -126,7 +126,7 @@ void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta)
|
|||
}
|
||||
|
||||
void vnodeSyncPreCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
|
||||
char logBuf[256];
|
||||
char logBuf[256] = {0};
|
||||
snprintf(logBuf, sizeof(logBuf),
|
||||
"==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", pFsm, cbMeta.index,
|
||||
cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state));
|
||||
|
@ -134,7 +134,7 @@ void vnodeSyncPreCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMet
|
|||
}
|
||||
|
||||
void vnodeSyncRollBackMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
|
||||
char logBuf[256];
|
||||
char logBuf[256] = {0};
|
||||
snprintf(logBuf, sizeof(logBuf), "==callback== ==RollBackCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n",
|
||||
pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state));
|
||||
syncRpcMsgLog2(logBuf, (SRpcMsg *)pMsg);
|
||||
|
@ -142,14 +142,13 @@ void vnodeSyncRollBackMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta
|
|||
|
||||
SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) {
|
||||
SSyncFSM *pFsm = taosMemoryCalloc(1, sizeof(SSyncFSM));
|
||||
memset(pFsm, 0, sizeof(*pFsm));
|
||||
pFsm->data = pVnode;
|
||||
pFsm->FpCommitCb = vnodeSyncCommitMsg;
|
||||
pFsm->FpPreCommitCb = vnodeSyncPreCommitMsg;
|
||||
pFsm->FpRollBackCb = vnodeSyncRollBackMsg;
|
||||
pFsm->FpGetSnapshot = vnodeSyncGetSnapshot;
|
||||
pFsm->FpRestoreFinishCb = NULL;
|
||||
pFsm->FpSnapshotRead = NULL;
|
||||
pFsm->FpSnapshotApply = NULL;
|
||||
pFsm->FpReConfigCb = NULL;
|
||||
|
||||
return pFsm;
|
||||
|
|
|
@ -97,11 +97,11 @@ if(NOT TD_WINDOWS)
|
|||
NAME idxtest
|
||||
COMMAND idxTest
|
||||
)
|
||||
add_test(
|
||||
NAME idxJsonUT
|
||||
COMMAND idxJsonUT
|
||||
)
|
||||
endif(NOT TD_WINDOWS)
|
||||
add_test(
|
||||
NAME idxJsonUT
|
||||
COMMAND idxJsonUT
|
||||
)
|
||||
add_test(
|
||||
NAME idxUtilUT
|
||||
COMMAND idxUtilUT
|
||||
|
|
|
@ -893,7 +893,7 @@ int32_t toISO8601Function(SScalarParam *pInput, int32_t inputNum, SScalarParam *
|
|||
memmove(tzInfo + fracLen, tzInfo, strlen(tzInfo));
|
||||
}
|
||||
|
||||
char tmp[32];
|
||||
char tmp[32] = {0};
|
||||
sprintf(tmp, ".%s", fraction);
|
||||
memcpy(tzInfo, tmp, fracLen);
|
||||
len += fracLen;
|
||||
|
|
|
@ -41,7 +41,7 @@ uint64_t schGenUUID(void) {
|
|||
static int32_t requestSerialId = 0;
|
||||
|
||||
if (hashId == 0) {
|
||||
char uid[64];
|
||||
char uid[64] = {0};
|
||||
int32_t code = taosGetSystemUUID(uid, tListLen(uid));
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("Failed to get the system uid, reason:%s", tstrerror(TAOS_SYSTEM_ERROR(errno)));
|
||||
|
|
|
@ -67,7 +67,7 @@ static int32_t streamBuildExecMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMs
|
|||
|
||||
} else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) {
|
||||
// TODO use general name rule of schemaless
|
||||
char ctbName[TSDB_TABLE_FNAME_LEN + 22];
|
||||
char ctbName[TSDB_TABLE_FNAME_LEN + 22] = {0};
|
||||
// all groupId must be the same in an array
|
||||
SSDataBlock* pBlock = taosArrayGet(data, 0);
|
||||
sprintf(ctbName, "%s:%ld", pTask->shuffleDispatcher.stbFullName, pBlock->info.groupId);
|
||||
|
|
|
@ -28,10 +28,12 @@ extern "C" {
|
|||
#include "taosdef.h"
|
||||
|
||||
typedef struct SSyncSnapshotSender {
|
||||
bool isStart;
|
||||
int32_t progressIndex;
|
||||
int32_t sending;
|
||||
int32_t received;
|
||||
bool finish;
|
||||
void * pCurrentBlock;
|
||||
int32_t len;
|
||||
int32_t blockLen;
|
||||
int64_t sendingMS;
|
||||
SSnapshot *pSnapshot;
|
||||
SSyncNode *pSyncNode;
|
||||
} SSyncSnapshotSender;
|
||||
|
@ -43,7 +45,8 @@ cJSON * snapshotSender2Json(SSyncSnapshotSender *pSender);
|
|||
char * snapshotSender2Str(SSyncSnapshotSender *pSender);
|
||||
|
||||
typedef struct SSyncSnapshotReceiver {
|
||||
bool isStart;
|
||||
bool start;
|
||||
int32_t received;
|
||||
int32_t progressIndex;
|
||||
void * pCurrentBlock;
|
||||
int32_t len;
|
||||
|
|
|
@ -89,7 +89,7 @@
|
|||
int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
|
||||
int32_t ret = 0;
|
||||
|
||||
char logBuf[128];
|
||||
char logBuf[128] = {0};
|
||||
snprintf(logBuf, sizeof(logBuf), "==syncNodeOnAppendEntriesCb== term:%lu", ths->pRaftStore->currentTerm);
|
||||
syncAppendEntriesLog2(logBuf, pMsg);
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* pMsg) {
|
||||
int32_t ret = 0;
|
||||
|
||||
char logBuf[128];
|
||||
char logBuf[128] = {0};
|
||||
snprintf(logBuf, sizeof(logBuf), "==syncNodeOnAppendEntriesReplyCb== term:%lu", ths->pRaftStore->currentTerm);
|
||||
syncAppendEntriesReplyLog2(logBuf, pMsg);
|
||||
|
||||
|
@ -57,7 +57,7 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* p
|
|||
// }
|
||||
|
||||
if (pMsg->term > ths->pRaftStore->currentTerm) {
|
||||
char logBuf[128];
|
||||
char logBuf[128] = {0};
|
||||
snprintf(logBuf, sizeof(logBuf), "syncNodeOnAppendEntriesReplyCb error term, receive:%lu current:%lu", pMsg->term,
|
||||
ths->pRaftStore->currentTerm);
|
||||
syncNodeLog2(logBuf, ths);
|
||||
|
|
|
@ -74,7 +74,7 @@ int32_t syncIOSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) {
|
|||
{
|
||||
syncUtilMsgNtoH(pMsg->pCont);
|
||||
|
||||
char logBuf[256];
|
||||
char logBuf[256] = {0};
|
||||
snprintf(logBuf, sizeof(logBuf), "==syncIOSendMsg== %s:%d", pEpSet->eps[0].fqdn, pEpSet->eps[0].port);
|
||||
syncRpcMsgLog2(logBuf, pMsg);
|
||||
|
||||
|
@ -89,7 +89,7 @@ int32_t syncIOSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) {
|
|||
|
||||
int32_t syncIOEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) {
|
||||
int32_t ret = 0;
|
||||
char logBuf[128];
|
||||
char logBuf[128] = {0};
|
||||
syncRpcMsgLog2((char *)"==syncIOEqMsg==", pMsg);
|
||||
|
||||
SRpcMsg *pTemp;
|
||||
|
|
|
@ -76,7 +76,7 @@ SyncIndex syncIndexMgrGetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaf
|
|||
}
|
||||
|
||||
cJSON *syncIndexMgr2Json(SSyncIndexMgr *pSyncIndexMgr) {
|
||||
char u64buf[128];
|
||||
char u64buf[128] = {0};
|
||||
cJSON *pRoot = cJSON_CreateObject();
|
||||
|
||||
if (pSyncIndexMgr != NULL) {
|
||||
|
|
|
@ -815,7 +815,7 @@ int32_t syncNodeSendMsgByInfo(const SNodeInfo* nodeInfo, SSyncNode* pSyncNode, S
|
|||
}
|
||||
|
||||
cJSON* syncNode2Json(const SSyncNode* pSyncNode) {
|
||||
char u64buf[128];
|
||||
char u64buf[128] = {0};
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
|
||||
if (pSyncNode != NULL) {
|
||||
|
@ -1338,7 +1338,7 @@ static int32_t syncNodeAppendNoop(SSyncNode* ths) {
|
|||
// on message ----
|
||||
int32_t syncNodeOnPingCb(SSyncNode* ths, SyncPing* pMsg) {
|
||||
// log state
|
||||
char logBuf[1024];
|
||||
char logBuf[1024] = {0};
|
||||
snprintf(logBuf, sizeof(logBuf),
|
||||
"==syncNodeOnPingCb== vgId:%d, state: %d, %s, term:%lu electTimerLogicClock:%lu, "
|
||||
"electTimerLogicClockUser:%lu, electTimerMS:%d",
|
||||
|
|
|
@ -215,7 +215,7 @@ SyncTimeout* syncTimeoutFromRpcMsg2(const SRpcMsg* pRpcMsg) {
|
|||
}
|
||||
|
||||
cJSON* syncTimeout2Json(const SyncTimeout* pMsg) {
|
||||
char u64buf[128];
|
||||
char u64buf[128] = {0};
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
|
||||
if (pMsg != NULL) {
|
||||
|
@ -442,7 +442,7 @@ SyncPing* syncPingFromRpcMsg2(const SRpcMsg* pRpcMsg) {
|
|||
}
|
||||
|
||||
cJSON* syncPing2Json(const SyncPing* pMsg) {
|
||||
char u64buf[128];
|
||||
char u64buf[128] = {0};
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
|
||||
if (pMsg != NULL) {
|
||||
|
@ -456,7 +456,7 @@ cJSON* syncPing2Json(const SyncPing* pMsg) {
|
|||
{
|
||||
uint64_t u64 = pMsg->srcId.addr;
|
||||
cJSON* pTmp = pSrcId;
|
||||
char host[128];
|
||||
char host[128] = {0};
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
|
@ -471,7 +471,7 @@ cJSON* syncPing2Json(const SyncPing* pMsg) {
|
|||
{
|
||||
uint64_t u64 = pMsg->destId.addr;
|
||||
cJSON* pTmp = pDestId;
|
||||
char host[128];
|
||||
char host[128] = {0};
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
|
@ -702,7 +702,7 @@ SyncPingReply* syncPingReplyFromRpcMsg2(const SRpcMsg* pRpcMsg) {
|
|||
}
|
||||
|
||||
cJSON* syncPingReply2Json(const SyncPingReply* pMsg) {
|
||||
char u64buf[128];
|
||||
char u64buf[128] = {0};
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
|
||||
if (pMsg != NULL) {
|
||||
|
@ -716,7 +716,7 @@ cJSON* syncPingReply2Json(const SyncPingReply* pMsg) {
|
|||
{
|
||||
uint64_t u64 = pMsg->srcId.addr;
|
||||
cJSON* pTmp = pSrcId;
|
||||
char host[128];
|
||||
char host[128] = {0};
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
|
@ -731,7 +731,7 @@ cJSON* syncPingReply2Json(const SyncPingReply* pMsg) {
|
|||
{
|
||||
uint64_t u64 = pMsg->destId.addr;
|
||||
cJSON* pTmp = pDestId;
|
||||
char host[128];
|
||||
char host[128] = {0};
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
|
@ -869,7 +869,7 @@ SyncClientRequest* syncClientRequestFromRpcMsg2(const SRpcMsg* pRpcMsg) {
|
|||
}
|
||||
|
||||
cJSON* syncClientRequest2Json(const SyncClientRequest* pMsg) {
|
||||
char u64buf[128];
|
||||
char u64buf[128] = {0};
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
|
||||
if (pMsg != NULL) {
|
||||
|
@ -995,7 +995,7 @@ SyncRequestVote* syncRequestVoteFromRpcMsg2(const SRpcMsg* pRpcMsg) {
|
|||
}
|
||||
|
||||
cJSON* syncRequestVote2Json(const SyncRequestVote* pMsg) {
|
||||
char u64buf[128];
|
||||
char u64buf[128] = {0};
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
|
||||
if (pMsg != NULL) {
|
||||
|
@ -1009,7 +1009,7 @@ cJSON* syncRequestVote2Json(const SyncRequestVote* pMsg) {
|
|||
{
|
||||
uint64_t u64 = pMsg->srcId.addr;
|
||||
cJSON* pTmp = pSrcId;
|
||||
char host[128];
|
||||
char host[128] = {0};
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
|
@ -1023,7 +1023,7 @@ cJSON* syncRequestVote2Json(const SyncRequestVote* pMsg) {
|
|||
{
|
||||
uint64_t u64 = pMsg->destId.addr;
|
||||
cJSON* pTmp = pDestId;
|
||||
char host[128];
|
||||
char host[128] = {0};
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
|
@ -1144,7 +1144,7 @@ SyncRequestVoteReply* syncRequestVoteReplyFromRpcMsg2(const SRpcMsg* pRpcMsg) {
|
|||
}
|
||||
|
||||
cJSON* syncRequestVoteReply2Json(const SyncRequestVoteReply* pMsg) {
|
||||
char u64buf[128];
|
||||
char u64buf[128] = {0};
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
|
||||
if (pMsg != NULL) {
|
||||
|
@ -1158,7 +1158,7 @@ cJSON* syncRequestVoteReply2Json(const SyncRequestVoteReply* pMsg) {
|
|||
{
|
||||
uint64_t u64 = pMsg->srcId.addr;
|
||||
cJSON* pTmp = pSrcId;
|
||||
char host[128];
|
||||
char host[128] = {0};
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
|
@ -1172,7 +1172,7 @@ cJSON* syncRequestVoteReply2Json(const SyncRequestVoteReply* pMsg) {
|
|||
{
|
||||
uint64_t u64 = pMsg->destId.addr;
|
||||
cJSON* pTmp = pDestId;
|
||||
char host[128];
|
||||
char host[128] = {0};
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
|
@ -1292,7 +1292,7 @@ SyncAppendEntries* syncAppendEntriesFromRpcMsg2(const SRpcMsg* pRpcMsg) {
|
|||
}
|
||||
|
||||
cJSON* syncAppendEntries2Json(const SyncAppendEntries* pMsg) {
|
||||
char u64buf[128];
|
||||
char u64buf[128] = {0};
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
|
||||
if (pMsg != NULL) {
|
||||
|
@ -1306,7 +1306,7 @@ cJSON* syncAppendEntries2Json(const SyncAppendEntries* pMsg) {
|
|||
{
|
||||
uint64_t u64 = pMsg->srcId.addr;
|
||||
cJSON* pTmp = pSrcId;
|
||||
char host[128];
|
||||
char host[128] = {0};
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
|
@ -1321,7 +1321,7 @@ cJSON* syncAppendEntries2Json(const SyncAppendEntries* pMsg) {
|
|||
{
|
||||
uint64_t u64 = pMsg->destId.addr;
|
||||
cJSON* pTmp = pDestId;
|
||||
char host[128];
|
||||
char host[128] = {0};
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
|
@ -1456,7 +1456,7 @@ SyncAppendEntriesReply* syncAppendEntriesReplyFromRpcMsg2(const SRpcMsg* pRpcMsg
|
|||
}
|
||||
|
||||
cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) {
|
||||
char u64buf[128];
|
||||
char u64buf[128] = {0};
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
|
||||
if (pMsg != NULL) {
|
||||
|
@ -1470,7 +1470,7 @@ cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) {
|
|||
{
|
||||
uint64_t u64 = pMsg->srcId.addr;
|
||||
cJSON* pTmp = pSrcId;
|
||||
char host[128];
|
||||
char host[128] = {0};
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
|
@ -1485,7 +1485,7 @@ cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) {
|
|||
{
|
||||
uint64_t u64 = pMsg->destId.addr;
|
||||
cJSON* pTmp = pDestId;
|
||||
char host[128];
|
||||
char host[128] = {0};
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
|
@ -1624,7 +1624,7 @@ void syncApplyMsg2OriginalRpcMsg(const SyncApplyMsg* pMsg, SRpcMsg* pOriginalRpc
|
|||
}
|
||||
|
||||
cJSON* syncApplyMsg2Json(const SyncApplyMsg* pMsg) {
|
||||
char u64buf[128];
|
||||
char u64buf[128] = {0};
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
|
||||
if (pMsg != NULL) {
|
||||
|
|
|
@ -28,7 +28,7 @@ SRaftCfg *raftCfgOpen(const char *path) {
|
|||
|
||||
taosLSeekFile(pCfg->pFile, 0, SEEK_SET);
|
||||
|
||||
char buf[1024];
|
||||
char buf[1024] = {0};
|
||||
int len = taosReadFile(pCfg->pFile, buf, sizeof(buf));
|
||||
assert(len > 0);
|
||||
|
||||
|
@ -51,15 +51,15 @@ int32_t raftCfgPersist(SRaftCfg *pRaftCfg) {
|
|||
char *s = raftCfg2Str(pRaftCfg);
|
||||
taosLSeekFile(pRaftCfg->pFile, 0, SEEK_SET);
|
||||
|
||||
char buf[CONFIG_FILE_LEN];
|
||||
char buf[CONFIG_FILE_LEN] = {0};
|
||||
memset(buf, 0, sizeof(buf));
|
||||
ASSERT(strlen(s) + 1 <= CONFIG_FILE_LEN);
|
||||
snprintf(buf, sizeof(buf), "%s", s);
|
||||
int64_t ret = taosWriteFile(pRaftCfg->pFile, buf, sizeof(buf));
|
||||
assert(ret == sizeof(buf));
|
||||
|
||||
//int64_t ret = taosWriteFile(pRaftCfg->pFile, s, strlen(s) + 1);
|
||||
//assert(ret == strlen(s) + 1);
|
||||
// int64_t ret = taosWriteFile(pRaftCfg->pFile, s, strlen(s) + 1);
|
||||
// assert(ret == strlen(s) + 1);
|
||||
|
||||
taosMemoryFree(s);
|
||||
taosFsyncFile(pRaftCfg->pFile);
|
||||
|
@ -67,7 +67,7 @@ int32_t raftCfgPersist(SRaftCfg *pRaftCfg) {
|
|||
}
|
||||
|
||||
cJSON *syncCfg2Json(SSyncCfg *pSyncCfg) {
|
||||
char u64buf[128];
|
||||
char u64buf[128] = {0};
|
||||
cJSON *pRoot = cJSON_CreateObject();
|
||||
|
||||
if (pSyncCfg != NULL) {
|
||||
|
@ -170,17 +170,17 @@ int32_t raftCfgCreateFile(SSyncCfg *pCfg, int8_t isStandBy, const char *path) {
|
|||
SRaftCfg raftCfg;
|
||||
raftCfg.cfg = *pCfg;
|
||||
raftCfg.isStandBy = isStandBy;
|
||||
char * s = raftCfg2Str(&raftCfg);
|
||||
char *s = raftCfg2Str(&raftCfg);
|
||||
|
||||
char buf[CONFIG_FILE_LEN];
|
||||
char buf[CONFIG_FILE_LEN] = {0};
|
||||
memset(buf, 0, sizeof(buf));
|
||||
ASSERT(strlen(s) + 1 <= CONFIG_FILE_LEN);
|
||||
snprintf(buf, sizeof(buf), "%s", s);
|
||||
int64_t ret = taosWriteFile(pFile, buf, sizeof(buf));
|
||||
assert(ret == sizeof(buf));
|
||||
|
||||
//int64_t ret = taosWriteFile(pFile, s, strlen(s) + 1);
|
||||
//assert(ret == strlen(s) + 1);
|
||||
// int64_t ret = taosWriteFile(pFile, s, strlen(s) + 1);
|
||||
// assert(ret == strlen(s) + 1);
|
||||
|
||||
taosMemoryFree(s);
|
||||
taosCloseFile(&pFile);
|
||||
|
|
|
@ -107,7 +107,7 @@ SSyncRaftEntry* syncEntryDeserialize(const char* buf, uint32_t len) {
|
|||
}
|
||||
|
||||
cJSON* syncEntry2Json(const SSyncRaftEntry* pEntry) {
|
||||
char u64buf[128];
|
||||
char u64buf[128] = {0};
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
|
||||
if (pEntry != NULL) {
|
||||
|
|
|
@ -190,7 +190,7 @@ SSyncRaftEntry* logStoreGetLastEntry(SSyncLogStore* pLogStore) {
|
|||
}
|
||||
|
||||
cJSON* logStore2Json(SSyncLogStore* pLogStore) {
|
||||
char u64buf[128];
|
||||
char u64buf[128] = {0};
|
||||
SSyncLogStoreData* pData = (SSyncLogStoreData*)pLogStore->data;
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
|
||||
|
@ -227,7 +227,7 @@ char* logStore2Str(SSyncLogStore* pLogStore) {
|
|||
}
|
||||
|
||||
cJSON* logStoreSimple2Json(SSyncLogStore* pLogStore) {
|
||||
char u64buf[128];
|
||||
char u64buf[128] = {0};
|
||||
SSyncLogStoreData* pData = (SSyncLogStoreData*)pLogStore->data;
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ SRaftStore *raftStoreOpen(const char *path) {
|
|||
memset(pRaftStore, 0, sizeof(*pRaftStore));
|
||||
snprintf(pRaftStore->path, sizeof(pRaftStore->path), "%s", path);
|
||||
|
||||
char storeBuf[RAFT_STORE_BLOCK_SIZE];
|
||||
char storeBuf[RAFT_STORE_BLOCK_SIZE] = {0};
|
||||
memset(storeBuf, 0, sizeof(storeBuf));
|
||||
|
||||
if (!raftStoreFileExist(pRaftStore->path)) {
|
||||
|
@ -84,7 +84,7 @@ int32_t raftStorePersist(SRaftStore *pRaftStore) {
|
|||
assert(pRaftStore != NULL);
|
||||
|
||||
int32_t ret;
|
||||
char storeBuf[RAFT_STORE_BLOCK_SIZE];
|
||||
char storeBuf[RAFT_STORE_BLOCK_SIZE] = {0};
|
||||
ret = raftStoreSerialize(pRaftStore, storeBuf, sizeof(storeBuf));
|
||||
assert(ret == 0);
|
||||
|
||||
|
@ -107,7 +107,7 @@ int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len) {
|
|||
|
||||
cJSON *pRoot = cJSON_CreateObject();
|
||||
|
||||
char u64Buf[128];
|
||||
char u64Buf[128] = {0};
|
||||
snprintf(u64Buf, sizeof(u64Buf), "%lu", pRaftStore->currentTerm);
|
||||
cJSON_AddStringToObject(pRoot, "current_term", u64Buf);
|
||||
|
||||
|
@ -117,7 +117,7 @@ int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len) {
|
|||
cJSON_AddNumberToObject(pRoot, "vote_for_vgid", pRaftStore->voteFor.vgId);
|
||||
|
||||
uint64_t u64 = pRaftStore->voteFor.addr;
|
||||
char host[128];
|
||||
char host[128] = {0};
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pRoot, "addr_host", host);
|
||||
|
@ -184,7 +184,7 @@ void raftStoreSetTerm(SRaftStore *pRaftStore, SyncTerm term) {
|
|||
int32_t raftStoreFromJson(SRaftStore *pRaftStore, cJSON *pJson) { return 0; }
|
||||
|
||||
cJSON *raftStore2Json(SRaftStore *pRaftStore) {
|
||||
char u64buf[128];
|
||||
char u64buf[128] = {0};
|
||||
cJSON *pRoot = cJSON_CreateObject();
|
||||
|
||||
if (pRaftStore != NULL) {
|
||||
|
@ -196,7 +196,7 @@ cJSON *raftStore2Json(SRaftStore *pRaftStore) {
|
|||
cJSON_AddStringToObject(pVoteFor, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pRaftStore->voteFor.addr;
|
||||
char host[128];
|
||||
char host[128] = {0};
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pVoteFor, "addr_host", host);
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
int32_t syncNodeOnRequestVoteCb(SSyncNode* ths, SyncRequestVote* pMsg) {
|
||||
int32_t ret = 0;
|
||||
|
||||
char logBuf[128];
|
||||
char logBuf[128] = {0};
|
||||
snprintf(logBuf, sizeof(logBuf), "==syncNodeOnRequestVoteCb== term:%lu", ths->pRaftStore->currentTerm);
|
||||
syncRequestVoteLog2(logBuf, pMsg);
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg) {
|
||||
int32_t ret = 0;
|
||||
|
||||
char logBuf[128];
|
||||
char logBuf[128] = {0};
|
||||
snprintf(logBuf, sizeof(logBuf), "==syncNodeOnRequestVoteReplyCb== term:%lu", ths->pRaftStore->currentTerm);
|
||||
syncRequestVoteReplyLog2(logBuf, pMsg);
|
||||
|
||||
|
@ -56,7 +56,7 @@ int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg)
|
|||
// }
|
||||
|
||||
if (pMsg->term > ths->pRaftStore->currentTerm) {
|
||||
char logBuf[128];
|
||||
char logBuf[128] = {0};
|
||||
snprintf(logBuf, sizeof(logBuf), "syncNodeOnRequestVoteReplyCb error term, receive:%lu current:%lu", pMsg->term,
|
||||
ths->pRaftStore->currentTerm);
|
||||
syncNodePrint2(logBuf, ths);
|
||||
|
|
|
@ -43,7 +43,7 @@ void syncUtilnodeInfo2EpSet(const SNodeInfo* pNodeInfo, SEpSet* pEpSet) {
|
|||
}
|
||||
|
||||
void syncUtilraftId2EpSet(const SRaftId* raftId, SEpSet* pEpSet) {
|
||||
char host[TSDB_FQDN_LEN];
|
||||
char host[TSDB_FQDN_LEN] = {0};
|
||||
uint16_t port;
|
||||
|
||||
syncUtilU642Addr(raftId->addr, host, sizeof(host), &port);
|
||||
|
@ -62,7 +62,7 @@ void syncUtilraftId2EpSet(const SRaftId* raftId, SEpSet* pEpSet) {
|
|||
void syncUtilnodeInfo2raftId(const SNodeInfo* pNodeInfo, SyncGroupId vgId, SRaftId* raftId) {
|
||||
uint32_t ipv4 = taosGetIpv4FromFqdn(pNodeInfo->nodeFqdn);
|
||||
assert(ipv4 != 0xFFFFFFFF);
|
||||
char ipbuf[128];
|
||||
char ipbuf[128] = {0};
|
||||
tinet_ntoa(ipbuf, ipv4);
|
||||
raftId->addr = syncUtilAddr2U64(ipbuf, pNodeInfo->nodePort);
|
||||
raftId->vgId = vgId;
|
||||
|
@ -106,7 +106,7 @@ int32_t syncUtilElectRandomMS(int32_t min, int32_t max) {
|
|||
int32_t syncUtilQuorum(int32_t replicaNum) { return replicaNum / 2 + 1; }
|
||||
|
||||
cJSON* syncUtilNodeInfo2Json(const SNodeInfo* p) {
|
||||
char u64buf[128];
|
||||
char u64buf[128] = {0};
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
|
||||
cJSON_AddStringToObject(pRoot, "nodeFqdn", p->nodeFqdn);
|
||||
|
@ -118,12 +118,12 @@ cJSON* syncUtilNodeInfo2Json(const SNodeInfo* p) {
|
|||
}
|
||||
|
||||
cJSON* syncUtilRaftId2Json(const SRaftId* p) {
|
||||
char u64buf[128];
|
||||
char u64buf[128] = {0};
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", p->addr);
|
||||
cJSON_AddStringToObject(pRoot, "addr", u64buf);
|
||||
char host[128];
|
||||
char host[128] = {0};
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(p->addr, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pRoot, "host", host);
|
||||
|
|
|
@ -90,7 +90,7 @@ void voteGrantedReset(SVotesGranted *pVotesGranted, SyncTerm term) {
|
|||
}
|
||||
|
||||
cJSON *voteGranted2Json(SVotesGranted *pVotesGranted) {
|
||||
char u64buf[128];
|
||||
char u64buf[128] = {0};
|
||||
cJSON *pRoot = cJSON_CreateObject();
|
||||
|
||||
if (pVotesGranted != NULL) {
|
||||
|
@ -220,7 +220,7 @@ void votesRespondReset(SVotesRespond *pVotesRespond, SyncTerm term) {
|
|||
}
|
||||
|
||||
cJSON *votesRespond2Json(SVotesRespond *pVotesRespond) {
|
||||
char u64buf[128];
|
||||
char u64buf[128] = {0};
|
||||
cJSON *pRoot = cJSON_CreateObject();
|
||||
|
||||
if (pVotesRespond != NULL) {
|
||||
|
|
|
@ -42,7 +42,7 @@ void CommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) {
|
|||
}
|
||||
|
||||
if (cbMeta.index > beginIndex) {
|
||||
char logBuf[256];
|
||||
char logBuf[256] = {0};
|
||||
snprintf(logBuf, sizeof(logBuf),
|
||||
"==callback== ==CommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", pFsm,
|
||||
cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag);
|
||||
|
@ -53,7 +53,7 @@ void CommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) {
|
|||
}
|
||||
|
||||
void PreCommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) {
|
||||
char logBuf[256];
|
||||
char logBuf[256] = {0};
|
||||
snprintf(logBuf, sizeof(logBuf),
|
||||
"==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", pFsm,
|
||||
cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag);
|
||||
|
@ -84,14 +84,15 @@ void ReConfigCb(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta)
|
|||
|
||||
SSyncFSM* createFsm() {
|
||||
SSyncFSM* pFsm = (SSyncFSM*)taosMemoryMalloc(sizeof(SSyncFSM));
|
||||
memset(pFsm, 0, sizeof(*pFsm));
|
||||
|
||||
pFsm->FpCommitCb = CommitCb;
|
||||
pFsm->FpPreCommitCb = PreCommitCb;
|
||||
pFsm->FpRollBackCb = RollBackCb;
|
||||
|
||||
pFsm->FpGetSnapshot = GetSnapshotCb;
|
||||
pFsm->FpRestoreFinishCb = RestoreFinishCb;
|
||||
pFsm->FpSnapshotApply = NULL;
|
||||
pFsm->FpSnapshotRead = NULL;
|
||||
|
||||
|
||||
pFsm->FpReConfigCb = ReConfigCb;
|
||||
|
||||
|
|
|
@ -75,6 +75,7 @@ int32_t GetSnapshotCb(struct SSyncFSM *pFsm, SSnapshot *pSnapshot) {
|
|||
|
||||
void initFsm() {
|
||||
pFsm = (SSyncFSM *)taosMemoryMalloc(sizeof(SSyncFSM));
|
||||
memset(pFsm, 0, sizeof(*pFsm));
|
||||
pFsm->FpCommitCb = CommitCb;
|
||||
pFsm->FpPreCommitCb = PreCommitCb;
|
||||
pFsm->FpRollBackCb = RollBackCb;
|
||||
|
|
|
@ -491,7 +491,7 @@ void taosDumpData(unsigned char *msg, int32_t len) {
|
|||
if (!osLogSpaceAvailable()) return;
|
||||
taosUpdateLogNums(DEBUG_DUMP);
|
||||
|
||||
char temp[256];
|
||||
char temp[256] = {0};
|
||||
int32_t i, pos = 0, c = 0;
|
||||
|
||||
for (i = 0; i < len; ++i) {
|
||||
|
|
|
@ -69,13 +69,13 @@ void taosStringBuilderAppendString(SStringBuilder* sb, const char* str) {
|
|||
void taosStringBuilderAppendNull(SStringBuilder* sb) { taosStringBuilderAppendStringLen(sb, "null", 4); }
|
||||
|
||||
void taosStringBuilderAppendInteger(SStringBuilder* sb, int64_t v) {
|
||||
char buf[64];
|
||||
char buf[64] = {0};
|
||||
size_t len = snprintf(buf, sizeof(buf), "%" PRId64, v);
|
||||
taosStringBuilderAppendStringLen(sb, buf, TMIN(len, sizeof(buf)));
|
||||
}
|
||||
|
||||
void taosStringBuilderAppendDouble(SStringBuilder* sb, double v) {
|
||||
char buf[512];
|
||||
char buf[512] = {0};
|
||||
size_t len = snprintf(buf, sizeof(buf), "%.9lf", v);
|
||||
taosStringBuilderAppendStringLen(sb, buf, TMIN(len, sizeof(buf)));
|
||||
}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
|
||||
python3 .\test.py -f 0-others\taosShell.py
|
||||
python3 .\test.py -f 0-others\taosShellError.py
|
||||
python3 .\test.py -f 0-others\taosShellNetChk.py
|
Loading…
Reference in New Issue