Merge branch '3.0' of https://github.com/taosdata/TDengine into 3.0
This commit is contained in:
commit
f2d352d4f7
|
@ -25,7 +25,7 @@ create_definition:
|
||||||
col_name column_definition
|
col_name column_definition
|
||||||
|
|
||||||
column_definition:
|
column_definition:
|
||||||
type_name [comment 'string_value']
|
type_name [comment 'string_value'] [PRIMARY KEY]
|
||||||
|
|
||||||
table_options:
|
table_options:
|
||||||
table_option ...
|
table_option ...
|
||||||
|
@ -41,11 +41,12 @@ table_option: {
|
||||||
**More explanations**
|
**More explanations**
|
||||||
|
|
||||||
1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key.
|
1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key.
|
||||||
2. The maximum length of the table name is 192 bytes.
|
2. In addition to the timestamp primary key column, an additional primary key column can be specified using the `PRIMARY KEY` keyword. The second column specified as the primary key must be of type integer or string (varchar).
|
||||||
3. The maximum length of each row is 48k(64k since version 3.0.5.0) bytes, please note that the extra 2 bytes used by each BINARY/NCHAR/GEOMETRY column are also counted.
|
3. The maximum length of the table name is 192 bytes.
|
||||||
4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive.
|
4. The maximum length of each row is 48k(64k since version 3.0.5.0) bytes, please note that the extra 2 bytes used by each BINARY/NCHAR/GEOMETRY column are also counted.
|
||||||
5. The maximum length in bytes must be specified when using BINARY/NCHAR/GEOMETRY types.
|
5. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive.
|
||||||
6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive.
|
6. The maximum length in bytes must be specified when using BINARY/NCHAR/GEOMETRY types.
|
||||||
|
7. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive.
|
||||||
For example \`aBc\` and \`abc\` are different table names but `abc` and `aBc` are same table names because they are both converted to `abc` internally.
|
For example \`aBc\` and \`abc\` are different table names but `abc` and `aBc` are same table names because they are both converted to `abc` internally.
|
||||||
Only ASCII visible characters can be used with escape character.
|
Only ASCII visible characters can be used with escape character.
|
||||||
|
|
||||||
|
@ -107,6 +108,7 @@ You can perform the following modifications on existing tables:
|
||||||
2. DROP COLUMN: deletes a column from the supertable.
|
2. DROP COLUMN: deletes a column from the supertable.
|
||||||
3. MODIFY COLUMN: changes the length of the data type specified for the column. Note that you can only specify a length greater than the current length.
|
3. MODIFY COLUMN: changes the length of the data type specified for the column. Note that you can only specify a length greater than the current length.
|
||||||
4. RENAME COLUMN: renames a specified column in the table.
|
4. RENAME COLUMN: renames a specified column in the table.
|
||||||
|
5. The primary key column of a table cannot be modified or added or deleted using ADD/DROP COLUMN.
|
||||||
|
|
||||||
### Add a Column
|
### Add a Column
|
||||||
|
|
||||||
|
|
|
@ -147,6 +147,7 @@ Modifications to the table schema of a supertable take effect on all subtables w
|
||||||
- DROP TAG: deletes a tag from the supertable. When you delete a tag from a supertable, it is automatically deleted from all subtables within the supertable.
|
- DROP TAG: deletes a tag from the supertable. When you delete a tag from a supertable, it is automatically deleted from all subtables within the supertable.
|
||||||
- MODIFY TAG: modifies the definition of a tag in the supertable. You can use this keyword to change the length of a BINARY or NCHAR tag column. Note that you can only specify a length greater than the current length.
|
- MODIFY TAG: modifies the definition of a tag in the supertable. You can use this keyword to change the length of a BINARY or NCHAR tag column. Note that you can only specify a length greater than the current length.
|
||||||
- RENAME TAG: renames a specified tag in the supertable. When you rename a tag in a supertable, it is automatically renamed in all subtables within the supertable.
|
- RENAME TAG: renames a specified tag in the supertable. When you rename a tag in a supertable, it is automatically renamed in all subtables within the supertable.
|
||||||
|
- Like odinary tables, the primary key of a supertable cannot be modified or added or deleted using ADD/DROP COLUMN.
|
||||||
|
|
||||||
### Add a Column
|
### Add a Column
|
||||||
|
|
||||||
|
|
|
@ -57,6 +57,7 @@ INSERT INTO
|
||||||
```
|
```
|
||||||
|
|
||||||
6. However, an INSERT statement that writes data to multiple subtables can succeed for some tables and fail for others. This situation is caused because vnodes perform write operations independently of each other. One vnode failing to write data does not affect the ability of other vnodes to write successfully.
|
6. However, an INSERT statement that writes data to multiple subtables can succeed for some tables and fail for others. This situation is caused because vnodes perform write operations independently of each other. One vnode failing to write data does not affect the ability of other vnodes to write successfully.
|
||||||
|
7. The primary key column value must be specified and cannot be NULL.
|
||||||
|
|
||||||
**Normal Syntax**
|
**Normal Syntax**
|
||||||
1. The USING clause automatically creates the specified subtable if it does not exist. If it's unknown whether the table already exists, the table can be created automatically while inserting using the SQL statement below. To use this functionality, a STable must be used as template and tag values must be provided. Any tags that you do not specify will be assigned a null value.
|
1. The USING clause automatically creates the specified subtable if it does not exist. If it's unknown whether the table already exists, the table can be created automatically while inserting using the SQL statement below. To use this functionality, a STable must be used as template and tag values must be provided. Any tags that you do not specify will be assigned a null value.
|
||||||
|
|
|
@ -504,7 +504,7 @@ TO_CHAR(ts, format_str_literal)
|
||||||
**Supported Formats**
|
**Supported Formats**
|
||||||
|
|
||||||
| **Format** | **Comment** | **example** |
|
| **Format** | **Comment** | **example** |
|
||||||
| --- | --- | --- |
|
| ------------------- | ---------------------------------------------- | ------------------------- |
|
||||||
| AM,am,PM,pm | Meridiem indicator(without periods) | 07:00:00am |
|
| AM,am,PM,pm | Meridiem indicator(without periods) | 07:00:00am |
|
||||||
| A.M.,a.m.,P.M.,p.m. | Meridiem indicator(with periods) | 07:00:00a.m. |
|
| A.M.,a.m.,P.M.,p.m. | Meridiem indicator(with periods) | 07:00:00a.m. |
|
||||||
| YYYY,yyyy | year, 4 or more digits | 2023-10-10 |
|
| YYYY,yyyy | year, 4 or more digits | 2023-10-10 |
|
||||||
|
@ -955,6 +955,7 @@ FIRST(expr)
|
||||||
- FIRST(\*) can be used to get the first non-null value of all columns; When querying a super table and multiResultFunctionStarReturnTags is set to 0 (default), FIRST(\*) only returns columns of super table; When set to 1, returns columns and tags of the super table.
|
- FIRST(\*) can be used to get the first non-null value of all columns; When querying a super table and multiResultFunctionStarReturnTags is set to 0 (default), FIRST(\*) only returns columns of super table; When set to 1, returns columns and tags of the super table.
|
||||||
- NULL will be returned if all the values of the specified column are all NULL
|
- NULL will be returned if all the values of the specified column are all NULL
|
||||||
- A result will NOT be returned if all the columns in the result set are all NULL
|
- A result will NOT be returned if all the columns in the result set are all NULL
|
||||||
|
- For a table with composite primary key, the data with the smallest primary key value is returned.
|
||||||
|
|
||||||
### INTERP
|
### INTERP
|
||||||
|
|
||||||
|
@ -988,6 +989,7 @@ ignore_null_values: {
|
||||||
- `INTERP` can be applied to supertable by interpolating primary key sorted data of all its childtables. It can also be used with `partition by tbname` when applied to supertable to generate interpolation on each single timeline.
|
- `INTERP` can be applied to supertable by interpolating primary key sorted data of all its childtables. It can also be used with `partition by tbname` when applied to supertable to generate interpolation on each single timeline.
|
||||||
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
|
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
|
||||||
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
|
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
|
||||||
|
- For a table with composite primary key, onley the data with the smallest primary key value is used to generate interpolation.
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
|
@ -1017,6 +1019,7 @@ LAST(expr)
|
||||||
- LAST(\*) can be used to get the last non-NULL value of all columns; When querying a super table and multiResultFunctionStarReturnTags is set to 0 (default), LAST(\*) only returns columns of super table; When set to 1, returns columns and tags of the super table.
|
- LAST(\*) can be used to get the last non-NULL value of all columns; When querying a super table and multiResultFunctionStarReturnTags is set to 0 (default), LAST(\*) only returns columns of super table; When set to 1, returns columns and tags of the super table.
|
||||||
- If the values of a column in the result set are all NULL, NULL is returned for that column; if all columns in the result are all NULL, no result will be returned.
|
- If the values of a column in the result set are all NULL, NULL is returned for that column; if all columns in the result are all NULL, no result will be returned.
|
||||||
- When it's used on a STable, if there are multiple values with the timestamp in the result set, one of them will be returned randomly and it's not guaranteed that the same value is returned if the same query is run multiple times.
|
- When it's used on a STable, if there are multiple values with the timestamp in the result set, one of them will be returned randomly and it's not guaranteed that the same value is returned if the same query is run multiple times.
|
||||||
|
- For a table with composite primary key, the data with the largest primary key value is returned.
|
||||||
|
|
||||||
|
|
||||||
### LAST_ROW
|
### LAST_ROW
|
||||||
|
@ -1038,6 +1041,7 @@ LAST_ROW(expr)
|
||||||
- LAST_ROW(\*) can be used to get the last value of all columns; When querying a super table and multiResultFunctionStarReturnTags is set to 0 (default), LAST_ROW(\*) only returns columns of super table; When set to 1, returns columns and tags of the super table.
|
- LAST_ROW(\*) can be used to get the last value of all columns; When querying a super table and multiResultFunctionStarReturnTags is set to 0 (default), LAST_ROW(\*) only returns columns of super table; When set to 1, returns columns and tags of the super table.
|
||||||
- When it's used on a STable, if there are multiple values with the timestamp in the result set, one of them will be returned randomly and it's not guaranteed that the same value is returned if the same query is run multiple times.
|
- When it's used on a STable, if there are multiple values with the timestamp in the result set, one of them will be returned randomly and it's not guaranteed that the same value is returned if the same query is run multiple times.
|
||||||
- Can't be used with `INTERVAL`.
|
- Can't be used with `INTERVAL`.
|
||||||
|
- Like `LAST`, the data with the largest primary key value is returned for a table with composite primary key.
|
||||||
|
|
||||||
### MAX
|
### MAX
|
||||||
|
|
||||||
|
@ -1144,7 +1148,7 @@ TOP(expr, k)
|
||||||
UNIQUE(expr)
|
UNIQUE(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword.
|
**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword. For a table with composite primary key, only the data with the smallest primary key value is returned.
|
||||||
|
|
||||||
**Return value type**:Same as the data type of the column being operated upon
|
**Return value type**:Same as the data type of the column being operated upon
|
||||||
|
|
||||||
|
@ -1190,7 +1194,7 @@ ignore_negative: {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The derivative of a specific column. The time rage can be specified by parameter `time_interval`, the minimum allowed time range is 1 second (1s); the value of `ignore_negative` can be 0 or 1, 1 means negative values are ignored.
|
**Description**: The derivative of a specific column. The time rage can be specified by parameter `time_interval`, the minimum allowed time range is 1 second (1s); the value of `ignore_negative` can be 0 or 1, 1 means negative values are ignored. For tables with composite primary key, the data with the smallest primary key value is used to calculate the derivative.
|
||||||
|
|
||||||
**Return value type**: DOUBLE
|
**Return value type**: DOUBLE
|
||||||
|
|
||||||
|
@ -1213,7 +1217,7 @@ ignore_negative: {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The different of each row with its previous row for a specific column. `ignore_negative` can be specified as 0 or 1, the default value is 1 if it's not specified. `1` means negative values are ignored.
|
**Description**: The different of each row with its previous row for a specific column. `ignore_negative` can be specified as 0 or 1, the default value is 1 if it's not specified. `1` means negative values are ignored. For tables with composite primary key, the data with the smallest primary key value is used to calculate the difference.
|
||||||
|
|
||||||
**Return value type**:Same as the data type of the column being operated upon
|
**Return value type**:Same as the data type of the column being operated upon
|
||||||
|
|
||||||
|
@ -1233,7 +1237,7 @@ ignore_negative: {
|
||||||
IRATE(expr)
|
IRATE(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: instantaneous rate on a specific column. The last two samples in the specified time range are used to calculate instantaneous rate. If the last sample value is smaller, then only the last sample value is used instead of the difference between the last two sample values.
|
**Description**: instantaneous rate on a specific column. The last two samples in the specified time range are used to calculate instantaneous rate. If the last sample value is smaller, then only the last sample value is used instead of the difference between the last two sample values. For tables with composite primary key, the data with the smallest primary key value is used to calculate the rate.
|
||||||
|
|
||||||
**Return value type**: DOUBLE
|
**Return value type**: DOUBLE
|
||||||
|
|
||||||
|
@ -1323,7 +1327,7 @@ STATEDURATION(expr, oper, val, unit)
|
||||||
TWA(expr)
|
TWA(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: Time weighted average on a specific column within a time range
|
**Description**: Time weighted average on a specific column within a time range. For tables with composite primary key, the data with the smallest primary key value is used to calculate the average.
|
||||||
|
|
||||||
**Return value type**: DOUBLE
|
**Return value type**: DOUBLE
|
||||||
|
|
||||||
|
|
|
@ -11,13 +11,14 @@ Because stream processing is built in to TDengine, you are no longer reliant on
|
||||||
## Create a Stream
|
## Create a Stream
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name SUBTABLE(expression) AS subquery
|
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name[(field1_name, field2_name [PRIMARY KEY], ...)] [TAGS (create_definition [, create_definition] ...)] SUBTABLE(expression) AS subquery
|
||||||
stream_options: {
|
stream_options: {
|
||||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
|
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
|
||||||
WATERMARK time
|
WATERMARK time
|
||||||
IGNORE EXPIRED [0|1]
|
IGNORE EXPIRED [0|1]
|
||||||
DELETE_MARK time
|
DELETE_MARK time
|
||||||
FILL_HISTORY [0|1]
|
FILL_HISTORY [0|1]
|
||||||
|
IGNORE UPDATE [0|1]
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -32,7 +33,7 @@ subquery: SELECT [DISTINCT] select_list
|
||||||
[window_clause]
|
[window_clause]
|
||||||
```
|
```
|
||||||
|
|
||||||
Session windows, state windows, and sliding windows are supported. When you configure a session or state window for a supertable, you must use PARTITION BY TBNAME.
|
Session windows, state windows, and sliding windows are supported. When you configure a session or state window for a supertable, you must use PARTITION BY TBNAME. If the source table has a composite primary key, state windows, event windows, and count windows are not supported.
|
||||||
|
|
||||||
Subtable Clause defines the naming rules of auto-created subtable, you can see more details in below part: Partitions of Stream.
|
Subtable Clause defines the naming rules of auto-created subtable, you can see more details in below part: Partitions of Stream.
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,7 @@ where:
|
||||||
- `tag_set` will be used as tags, with format like `<tag_key>=<tag_value>,<tag_key>=<tag_value>` Enter a space between `tag_set` and `field_set`.
|
- `tag_set` will be used as tags, with format like `<tag_key>=<tag_value>,<tag_key>=<tag_value>` Enter a space between `tag_set` and `field_set`.
|
||||||
- `field_set`will be used as data columns, with format like `<field_key>=<field_value>,<field_key>=<field_value>` Enter a space between `field_set` and `timestamp`.
|
- `field_set`will be used as data columns, with format like `<field_key>=<field_value>,<field_key>=<field_value>` Enter a space between `field_set` and `timestamp`.
|
||||||
- `timestamp` is the primary key timestamp corresponding to this row of data
|
- `timestamp` is the primary key timestamp corresponding to this row of data
|
||||||
|
- schemaless writing does not support writing data to tables with a second primary key column.
|
||||||
|
|
||||||
All data in tag_set is automatically converted to the NCHAR data type and does not require double quotes (").
|
All data in tag_set is automatically converted to the NCHAR data type and does not require double quotes (").
|
||||||
|
|
||||||
|
@ -39,7 +40,7 @@ In the schemaless writing data line protocol, each data item in the field_set ne
|
||||||
- Spaces, equals sign (=), comma (,), double quote ("), and backslash (\\) need to be escaped with a backslash (\\) in front. (All refer to the ASCII character). The rules are as follows:
|
- Spaces, equals sign (=), comma (,), double quote ("), and backslash (\\) need to be escaped with a backslash (\\) in front. (All refer to the ASCII character). The rules are as follows:
|
||||||
|
|
||||||
| **Serial number** | **Element** | **Escape characters** |
|
| **Serial number** | **Element** | **Escape characters** |
|
||||||
| -------- | ----------- | ----------------------------- |
|
| ----------------- | ----------- | ------------------------- |
|
||||||
| 1 | Measurement | Comma, Space |
|
| 1 | Measurement | Comma, Space |
|
||||||
| 2 | Tag key | Comma, Equals Sign, Space |
|
| 2 | Tag key | Comma, Equals Sign, Space |
|
||||||
| 3 | Tag value | Comma, Equals Sign, Space |
|
| 3 | Tag value | Comma, Equals Sign, Space |
|
||||||
|
@ -49,7 +50,7 @@ In the schemaless writing data line protocol, each data item in the field_set ne
|
||||||
With two contiguous backslashes, the first is interpreted as an escape character. Examples of backslash escape rules are as follows:
|
With two contiguous backslashes, the first is interpreted as an escape character. Examples of backslash escape rules are as follows:
|
||||||
|
|
||||||
| **Serial number** | **Backslashes** | **Interpreted as** |
|
| **Serial number** | **Backslashes** | **Interpreted as** |
|
||||||
| -------- | ----------- | ----------------------------- |
|
| ----------------- | --------------- | ------------------ |
|
||||||
| 1 | \ | \ |
|
| 1 | \ | \ |
|
||||||
| 2 | \\\\ | \ |
|
| 2 | \\\\ | \ |
|
||||||
| 3 | \\\\\\ | \\\\ |
|
| 3 | \\\\\\ | \\\\ |
|
||||||
|
|
|
@ -0,0 +1,92 @@
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
title: Configurable Column Compression
|
||||||
|
description: Configurable column storage compression method
|
||||||
|
---
|
||||||
|
|
||||||
|
# Configurable Storage Compression
|
||||||
|
|
||||||
|
Since TDengine 3.3.0.0, more advanced compression feature is introduced, you can specify compression or not, the compression method and compression level for each column.
|
||||||
|
|
||||||
|
## Compression Terminology Definition
|
||||||
|
|
||||||
|
### Compression Level Definition
|
||||||
|
|
||||||
|
- Level 1 Compression: Encoding the data, which is essentially a form of compression
|
||||||
|
- Level 2 Compression: Compressing data blocks.
|
||||||
|
|
||||||
|
### Compression Algorithm Level
|
||||||
|
|
||||||
|
In this article, it specifically refers to the level within the secondary compression algorithm, such as zstd, at least 8 levels can be selected, each level has different performance, essentially it is a tradeoff between compression ratio, compression speed, and decompression speed. To avoid the difficulty of choice, it is simplified and defined as the following three levels:
|
||||||
|
|
||||||
|
- high: The highest compression ratio, the worst compression speed and decompression speed.
|
||||||
|
- low: The best compression speed and decompression speed, the lowest compression ratio.
|
||||||
|
- medium: Balancing compression ratio, compression speed, and decompression speed.
|
||||||
|
|
||||||
|
### Compression Algorithm List
|
||||||
|
|
||||||
|
- Encoding algorithm list (Level 1 compression): simple8b, bit-packing, delta-i, delta-d, disabled
|
||||||
|
|
||||||
|
- Compression algorithm list (Level 2 compression): lz4, zlib, zstd, tsz, xz, disabled
|
||||||
|
|
||||||
|
- Default compression algorithm list and applicable range for each data type
|
||||||
|
|
||||||
|
| Data Type | Optional Encoding Algorithm | Default Encoding Algorithm | Optional Compression Algorithm|Default Compression Algorithm| Default Compression Level|
|
||||||
|
| :-----------:|:----------:|:-------:|:-------:|:----------:|:----:|
|
||||||
|
tinyint/untinyint/smallint/usmallint/int/uint | simple8b| simple8b | lz4/zlib/zstd/xz| lz4 | medium|
|
||||||
|
| bigint/ubigint/timestamp | simple8b/delta-i | delta-i |lz4/zlib/zstd/xz | lz4| medium|
|
||||||
|
|float/double | delta-d|delta-d |lz4/zlib/zstd/xz/tsz|tsz| medium|
|
||||||
|
|binary/nchar| disabled| disabled|lz4/zlib/zstd/xz| lz4| medium|
|
||||||
|
|bool| bit-packing| bit-packing| lz4/zlib/zstd/xz| lz4| medium|
|
||||||
|
|
||||||
|
Note: For floating point types, if configured as tsz, its precision is determined by the global configuration of taosd. If configured as tsz, but the lossy compression flag is not configured, lz4 is used for compression by default.
|
||||||
|
|
||||||
|
## SQL
|
||||||
|
|
||||||
|
### Create Table with Compression
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE [dbname.]tabname (colName colType [ENCODE 'encode_type'] [COMPRESS 'compress_type' [LEVEL 'level'], [, other cerate_definition]...])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameter Description**
|
||||||
|
|
||||||
|
- tabname: Super table or ordinary table name
|
||||||
|
- encode_type: Level 1 compression, specific parameters see the above list
|
||||||
|
- compress_type: Level 2 compression, specific parameters see the above list
|
||||||
|
- level: Specifically refers to the level of secondary compression, the default value is medium, supports abbreviation as 'h'/'l'/'m'
|
||||||
|
|
||||||
|
**Function Description**
|
||||||
|
|
||||||
|
- Specify the compression method for the column when creating a table
|
||||||
|
|
||||||
|
### Change Compression Method
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ALTER TABLE [db_name.]tabName MODIFY COLUMN colName [ENCODE 'ecode_type'] [COMPRESS 'compress_type'] [LEVEL "high"]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameter Description**
|
||||||
|
|
||||||
|
- tabName: Table name, can be a super table or an ordinary table
|
||||||
|
- colName: The column to change the compression algorithm, can only be a normal column
|
||||||
|
|
||||||
|
**Function Description**
|
||||||
|
|
||||||
|
- Change the compression method of the column
|
||||||
|
|
||||||
|
### View Compression Dethod
|
||||||
|
|
||||||
|
```sql
|
||||||
|
DESCRIBE [dbname.]tabName
|
||||||
|
```
|
||||||
|
|
||||||
|
**Function Description**
|
||||||
|
|
||||||
|
- Display basic information of the column, including type and compression method
|
||||||
|
|
||||||
|
## Compatibility
|
||||||
|
|
||||||
|
- Fully compatible with existing data
|
||||||
|
- Can't be rolled back once you upgrade to 3.3.0.0
|
|
@ -23,7 +23,7 @@ create_subtable_clause: {
|
||||||
}
|
}
|
||||||
|
|
||||||
create_definition:
|
create_definition:
|
||||||
col_name column_type
|
col_name column_type [PRIMARY KEY]
|
||||||
|
|
||||||
table_options:
|
table_options:
|
||||||
table_option ...
|
table_option ...
|
||||||
|
@ -38,12 +38,13 @@ table_option: {
|
||||||
|
|
||||||
**使用说明**
|
**使用说明**
|
||||||
|
|
||||||
1. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键;
|
1. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键。
|
||||||
2. 表名最大长度为 192;
|
2. 除时间戳主键列之外,还可以通过 PRIAMRY KEY 关键字指定第二列为额外的主键列。被指定为主键列的第二列必须为整型或字符串类型(varchar)。
|
||||||
3. 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB);(注意:每个 BINARY/NCHAR/GEOMETRY 类型的列还会额外占用 2 个字节的存储位置)
|
3. 表名最大长度为 192。
|
||||||
4. 子表名只能由字母、数字和下划线组成,且不能以数字开头,不区分大小写
|
4. 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB);(注意:每个 BINARY/NCHAR/GEOMETRY 类型的列还会额外占用 2 个字节的存储位置)。
|
||||||
5. 使用数据类型 BINARY/NCHAR/GEOMETRY,需指定其最长的字节数,如 BINARY(20),表示 20 字节;
|
5. 子表名只能由字母、数字和下划线组成,且不能以数字开头,不区分大小写。
|
||||||
6. 为了兼容支持更多形式的表名,TDengine 引入新的转义符 "\`",可以让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。但是同样具有长度限制要求。使用转义字符以后,不再对转义字符中的内容进行大小写统一。
|
6. 使用数据类型 BINARY/NCHAR/GEOMETRY,需指定其最长的字节数,如 BINARY(20),表示 20 字节。
|
||||||
|
7. 为了兼容支持更多形式的表名,TDengine 引入新的转义符 "\`",可以让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。但是同样具有长度限制要求。使用转义字符以后,不再对转义字符中的内容进行大小写统一,
|
||||||
例如:\`aBc\` 和 \`abc\` 是不同的表名,但是 abc 和 aBc 是相同的表名。
|
例如:\`aBc\` 和 \`abc\` 是不同的表名,但是 abc 和 aBc 是相同的表名。
|
||||||
|
|
||||||
**参数说明**
|
**参数说明**
|
||||||
|
@ -106,6 +107,7 @@ alter_table_option: {
|
||||||
2. DROP COLUMN:删除列。
|
2. DROP COLUMN:删除列。
|
||||||
3. MODIFY COLUMN:修改列定义,如果数据列的类型是可变长类型,那么可以使用此指令修改其宽度,只能改大,不能改小。
|
3. MODIFY COLUMN:修改列定义,如果数据列的类型是可变长类型,那么可以使用此指令修改其宽度,只能改大,不能改小。
|
||||||
4. RENAME COLUMN:修改列名称。
|
4. RENAME COLUMN:修改列名称。
|
||||||
|
5. 普通表的主键列不能被修改,也不能通过 ADD/DROP COLUMN 来添加/删除主键列。
|
||||||
|
|
||||||
### 增加列
|
### 增加列
|
||||||
|
|
||||||
|
|
|
@ -148,6 +148,7 @@ alter_table_option: {
|
||||||
- DROP TAG:删除超级表的一个标签。从超级表删除某个标签后,该超级表下的所有子表也会自动删除该标签。
|
- DROP TAG:删除超级表的一个标签。从超级表删除某个标签后,该超级表下的所有子表也会自动删除该标签。
|
||||||
- MODIFY TAG:修改超级表的一个标签的列宽度。标签的类型只能是 nchar 和 binary,使用此指令可以修改其宽度,只能改大,不能改小。
|
- MODIFY TAG:修改超级表的一个标签的列宽度。标签的类型只能是 nchar 和 binary,使用此指令可以修改其宽度,只能改大,不能改小。
|
||||||
- RENAME TAG:修改超级表的一个标签的名称。从超级表修改某个标签名后,该超级表下的所有子表也会自动更新该标签名。
|
- RENAME TAG:修改超级表的一个标签的名称。从超级表修改某个标签名后,该超级表下的所有子表也会自动更新该标签名。
|
||||||
|
- 与普通表一样,超级表的主键列不允许被修改,也不允许通过 ADD/DROP COLUMN 来添加或删除主键列。
|
||||||
|
|
||||||
### 增加列
|
### 增加列
|
||||||
|
|
||||||
|
|
|
@ -57,6 +57,7 @@ INSERT INTO
|
||||||
INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
|
INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
|
||||||
```
|
```
|
||||||
6. 对于向多个子表插入数据的情况,依然会有部分数据写入失败,部分数据写入成功的情况。这是因为多个子表可能分布在不同的 VNODE 上,客户端将 INSERT 语句完整解析后,将数据发往各个涉及的 VNODE 上,每个 VNODE 独立进行写入操作。如果某个 VNODE 因为某些原因(比如网络问题或磁盘故障)导致写入失败,并不会影响其他 VNODE 节点的写入。
|
6. 对于向多个子表插入数据的情况,依然会有部分数据写入失败,部分数据写入成功的情况。这是因为多个子表可能分布在不同的 VNODE 上,客户端将 INSERT 语句完整解析后,将数据发往各个涉及的 VNODE 上,每个 VNODE 独立进行写入操作。如果某个 VNODE 因为某些原因(比如网络问题或磁盘故障)导致写入失败,并不会影响其他 VNODE 节点的写入。
|
||||||
|
7. 主键列值必须指定且不能为 NULL。
|
||||||
|
|
||||||
**正常语法说明**
|
**正常语法说明**
|
||||||
|
|
||||||
|
|
|
@ -504,7 +504,7 @@ TO_CHAR(ts, format_str_literal)
|
||||||
**支持的格式**
|
**支持的格式**
|
||||||
|
|
||||||
| **格式** | **说明** | **例子** |
|
| **格式** | **说明** | **例子** |
|
||||||
| --- | --- | --- |
|
| ------------------- | ----------------------------------------- | ------------------------- |
|
||||||
| AM,am,PM,pm | 无点分隔的上午下午 | 07:00:00am |
|
| AM,am,PM,pm | 无点分隔的上午下午 | 07:00:00am |
|
||||||
| A.M.,a.m.,P.M.,p.m. | 有点分隔的上午下午 | 07:00:00a.m. |
|
| A.M.,a.m.,P.M.,p.m. | 有点分隔的上午下午 | 07:00:00a.m. |
|
||||||
| YYYY,yyyy | 年, 4个及以上数字 | 2023-10-10 |
|
| YYYY,yyyy | 年, 4个及以上数字 | 2023-10-10 |
|
||||||
|
@ -957,6 +957,7 @@ FIRST(expr)
|
||||||
- 如果要返回各个列的首个(时间戳最小)非 NULL 值,可以使用 FIRST(\*);查询超级表,且multiResultFunctionStarReturnTags设置为 0 (默认值) 时,FIRST(\*)只返回超级表的普通列;设置为 1 时,返回超级表的普通列和标签列。
|
- 如果要返回各个列的首个(时间戳最小)非 NULL 值,可以使用 FIRST(\*);查询超级表,且multiResultFunctionStarReturnTags设置为 0 (默认值) 时,FIRST(\*)只返回超级表的普通列;设置为 1 时,返回超级表的普通列和标签列。
|
||||||
- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL;
|
- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL;
|
||||||
- 如果结果集中所有列全部为 NULL 值,则不返回结果。
|
- 如果结果集中所有列全部为 NULL 值,则不返回结果。
|
||||||
|
- 对于存在复合主键的表的查询,若最小时间戳的数据有多条,则只有对应的复合主键最小的数据被返回。
|
||||||
|
|
||||||
### INTERP
|
### INTERP
|
||||||
|
|
||||||
|
@ -989,6 +990,7 @@ ignore_null_values: {
|
||||||
- INTERP 作用于超级表时, 会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。
|
- INTERP 作用于超级表时, 会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。
|
||||||
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。
|
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。
|
||||||
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。
|
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。
|
||||||
|
- INTERP 对于带复合主键的表的查询,若存在相同时间戳的数据,则只有对应的复合主键最小的数据参与运算。
|
||||||
|
|
||||||
### LAST
|
### LAST
|
||||||
|
|
||||||
|
@ -1009,6 +1011,7 @@ LAST(expr)
|
||||||
- 如果要返回各个列的最后(时间戳最大)一个非 NULL 值,可以使用 LAST(\*);查询超级表,且multiResultFunctionStarReturnTags设置为 0 (默认值) 时,LAST(\*)只返回超级表的普通列;设置为 1 时,返回超级表的普通列和标签列。
|
- 如果要返回各个列的最后(时间戳最大)一个非 NULL 值,可以使用 LAST(\*);查询超级表,且multiResultFunctionStarReturnTags设置为 0 (默认值) 时,LAST(\*)只返回超级表的普通列;设置为 1 时,返回超级表的普通列和标签列。
|
||||||
- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL;如果结果集中所有列全部为 NULL 值,则不返回结果。
|
- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL;如果结果集中所有列全部为 NULL 值,则不返回结果。
|
||||||
- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。
|
- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。
|
||||||
|
- 对于存在复合主键的表的查询,若最大时间戳的数据有多条,则只有对应的复合主键最大的数据被返回。
|
||||||
|
|
||||||
|
|
||||||
### LAST_ROW
|
### LAST_ROW
|
||||||
|
@ -1029,6 +1032,7 @@ LAST_ROW(expr)
|
||||||
- 如果要返回各个列的最后一条记录(时间戳最大),可以使用 LAST_ROW(\*);查询超级表,且multiResultFunctionStarReturnTags设置为 0 (默认值) 时,LAST_ROW(\*)只返回超级表的普通列;设置为 1 时,返回超级表的普通列和标签列。
|
- 如果要返回各个列的最后一条记录(时间戳最大),可以使用 LAST_ROW(\*);查询超级表,且multiResultFunctionStarReturnTags设置为 0 (默认值) 时,LAST_ROW(\*)只返回超级表的普通列;设置为 1 时,返回超级表的普通列和标签列。
|
||||||
- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。
|
- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。
|
||||||
- 不能与 INTERVAL 一起使用。
|
- 不能与 INTERVAL 一起使用。
|
||||||
|
- 与 LAST 函数一样,对于存在复合主键的表的查询,若最大时间戳的数据有多条,则只有对应的复合主键最大的数据被返回。
|
||||||
|
|
||||||
### MAX
|
### MAX
|
||||||
|
|
||||||
|
@ -1135,7 +1139,7 @@ TOP(expr, k)
|
||||||
UNIQUE(expr)
|
UNIQUE(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**功能说明**:返回该列数据首次出现的值。该函数功能与 distinct 相似。
|
**功能说明**:返回该列数据首次出现的值。该函数功能与 distinct 相似。对于存在复合主键的表的查询,若最小时间戳的数据有多条,则只有对应的复合主键最小的数据被返回。
|
||||||
|
|
||||||
**返回数据类型**:同应用的字段。
|
**返回数据类型**:同应用的字段。
|
||||||
|
|
||||||
|
@ -1181,7 +1185,7 @@ ignore_negative: {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**功能说明**:统计表中某列数值的单位变化率。其中单位时间区间的长度可以通过 time_interval 参数指定,最小可以是 1 秒(1s);ignore_negative 参数的值可以是 0 或 1,为 1 时表示忽略负值。
|
**功能说明**:统计表中某列数值的单位变化率。其中单位时间区间的长度可以通过 time_interval 参数指定,最小可以是 1 秒(1s);ignore_negative 参数的值可以是 0 或 1,为 1 时表示忽略负值。对于存在复合主键的表的查询,若时间戳相同的数据存在多条,则只有对应的复合主键最小的数据参与运算。
|
||||||
|
|
||||||
**返回数据类型**:DOUBLE。
|
**返回数据类型**:DOUBLE。
|
||||||
|
|
||||||
|
@ -1204,7 +1208,7 @@ ignore_negative: {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**功能说明**:统计表中某列的值与前一行对应值的差。 ignore_negative 取值为 0|1 , 可以不填,默认值为 0. 不忽略负值。ignore_negative 为 1 时表示忽略负数。
|
**功能说明**:统计表中某列的值与前一行对应值的差。 ignore_negative 取值为 0|1 , 可以不填,默认值为 0. 不忽略负值。ignore_negative 为 1 时表示忽略负数。对于你存在复合主键的表的查询,若时间戳相同的数据存在多条,则只有对应的复合主键最小的数据参与运算。
|
||||||
|
|
||||||
**返回数据类型**:同应用字段。
|
**返回数据类型**:同应用字段。
|
||||||
|
|
||||||
|
@ -1224,7 +1228,7 @@ ignore_negative: {
|
||||||
IRATE(expr)
|
IRATE(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**功能说明**:计算瞬时增长率。使用时间区间中最后两个样本数据来计算瞬时增长速率;如果这两个值呈递减关系,那么只取最后一个数用于计算,而不是使用二者差值。
|
**功能说明**:计算瞬时增长率。使用时间区间中最后两个样本数据来计算瞬时增长速率;如果这两个值呈递减关系,那么只取最后一个数用于计算,而不是使用二者差值。对于存在复合主键的表的查询,若时间戳相同的数据存在多条,则只有对应的复合主键最小的数据参与运算。
|
||||||
|
|
||||||
**返回数据类型**:DOUBLE。
|
**返回数据类型**:DOUBLE。
|
||||||
|
|
||||||
|
@ -1314,7 +1318,7 @@ STATEDURATION(expr, oper, val, unit)
|
||||||
TWA(expr)
|
TWA(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。
|
**功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。对于存在复合主键的表的查询,若时间戳相同的数据存在多条,则只有对应的复合主键最小的数据参与运算。
|
||||||
|
|
||||||
**返回数据类型**:DOUBLE。
|
**返回数据类型**:DOUBLE。
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ description: 流式计算的相关 SQL 的详细语法
|
||||||
## 创建流式计算
|
## 创建流式计算
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name[(field1_name, ...)] [TAGS (create_definition [, create_definition] ...)] SUBTABLE(expression) AS subquery
|
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name[(field1_name, field2_name [PRIMARY KEY], ...)] [TAGS (create_definition [, create_definition] ...)] SUBTABLE(expression) AS subquery
|
||||||
stream_options: {
|
stream_options: {
|
||||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
|
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
|
||||||
WATERMARK time
|
WATERMARK time
|
||||||
|
@ -30,9 +30,9 @@ subquery: SELECT select_list
|
||||||
[window_clause]
|
[window_clause]
|
||||||
```
|
```
|
||||||
|
|
||||||
支持会话窗口、状态窗口、滑动窗口、事件窗口和计数窗口,其中,状态窗口、事件窗口和计数窗口搭配超级表时必须与partition by tbname一起使用
|
支持会话窗口、状态窗口、滑动窗口、事件窗口和计数窗口,其中,状态窗口、事件窗口和计数窗口搭配超级表时必须与partition by tbname一起使用。对于数据源表是复合主键的流,不支持状态窗口、事件窗口、计数窗口的计算。
|
||||||
|
|
||||||
stb_name 是保存计算结果的超级表的表名,如果该超级表不存在,会自动创建;如果已存在,则检查列的schema信息。详见 写入已存在的超级表
|
stb_name 是保存计算结果的超级表的表名,如果该超级表不存在,会自动创建;如果已存在,则检查列的schema信息。详见 写入已存在的超级表。
|
||||||
|
|
||||||
TAGS 子句定义了流计算中创建TAG的规则,可以为每个partition对应的子表生成自定义的TAG值,详见 自定义TAG
|
TAGS 子句定义了流计算中创建TAG的规则,可以为每个partition对应的子表生成自定义的TAG值,详见 自定义TAG
|
||||||
```sql
|
```sql
|
||||||
|
|
|
@ -28,6 +28,7 @@ measurement,tag_set field_set timestamp
|
||||||
- tag_set 将作为标签数据,其格式形如 `<tag_key>=<tag_value>,<tag_key>=<tag_value>`,也即可以使用英文逗号来分隔多个标签数据。它与 field_set 之间使用一个半角空格来分隔。
|
- tag_set 将作为标签数据,其格式形如 `<tag_key>=<tag_value>,<tag_key>=<tag_value>`,也即可以使用英文逗号来分隔多个标签数据。它与 field_set 之间使用一个半角空格来分隔。
|
||||||
- field_set 将作为普通列数据,其格式形如 `<field_key>=<field_value>,<field_key>=<field_value>`,同样是使用英文逗号来分隔多个普通列的数据。它与 timestamp 之间使用一个半角空格来分隔。
|
- field_set 将作为普通列数据,其格式形如 `<field_key>=<field_value>,<field_key>=<field_value>`,同样是使用英文逗号来分隔多个普通列的数据。它与 timestamp 之间使用一个半角空格来分隔。
|
||||||
- timestamp 即本行数据对应的主键时间戳。
|
- timestamp 即本行数据对应的主键时间戳。
|
||||||
|
- 无模式写入不支持含第二主键列的表的数据写入。
|
||||||
|
|
||||||
tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要使用双引号(")。
|
tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要使用双引号(")。
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,91 @@
|
||||||
|
---
|
||||||
|
title: 可配置压缩算法
|
||||||
|
description: 可配置压缩算法
|
||||||
|
---
|
||||||
|
|
||||||
|
# 可配置存储压缩
|
||||||
|
|
||||||
|
从 TDengine 3.3.0.0 版本开始,TDengine 提供了更高级的压缩功能,用户可以在建表时针对每一列配置是否进行压缩、以及使用的压缩算法和压缩级别。
|
||||||
|
|
||||||
|
## 压缩术语定义
|
||||||
|
|
||||||
|
### 压缩等级
|
||||||
|
|
||||||
|
- 一级压缩:对数据进行编码,本质也是一种压缩
|
||||||
|
- 二级压缩:在编码的基础上对数据块进行压缩
|
||||||
|
|
||||||
|
### 压缩级别
|
||||||
|
|
||||||
|
在本文中特指二级压缩算法内部的级别,比如zstd,至少8个level可选,每个level 下都有不同表现,本质是压缩率、压缩速度、解压速度之间的 tradeoff,为了避免选择困难,特简化定义为如下三种级别:
|
||||||
|
|
||||||
|
- high:压缩率最高,压缩速度和解压速度相对最差。
|
||||||
|
- low:压缩速度和解压速度最好,压缩率相对最低。
|
||||||
|
- medium:兼顾压缩率、压缩速度和解压速度。
|
||||||
|
|
||||||
|
### 压缩算法列表
|
||||||
|
|
||||||
|
- 编码算法列表(一级压缩):simple8b, bit-packing,delta-i, delta-d, disabled
|
||||||
|
|
||||||
|
- 压缩算法列表(二级压缩): lz4、zlib、zstd、tsz、xz、disabled
|
||||||
|
|
||||||
|
- 各个数据类型的默认压缩算法列表和适用范围
|
||||||
|
|
||||||
|
| 数据类型 | 可选编码算法 | 编码算法默认值 | 可选压缩算法|可选压缩算法| 压缩等级默认值|
|
||||||
|
| :-----------:|:----------:|:-------:|:-------:|:----------:|:----:|
|
||||||
|
tinyint/untinyint/smallint/usmallint/int/uint | simple8b| simple8b | lz4/zlib/zstd/xz| lz4 | medium|
|
||||||
|
| bigint/ubigint/timestamp | simple8b/delta-i | delta-i |lz4/zlib/zstd/xz | lz4| medium|
|
||||||
|
|float/double | delta-d|delta-d |lz4/zlib/zstd/xz/tsz|tsz| medium|
|
||||||
|
|binary/nchar| disabled| disabled|lz4/zlib/zstd/xz| lz4| medium|
|
||||||
|
|bool| bit-packing| bit-packing| lz4/zlib/zstd/xz| lz4| medium|
|
||||||
|
|
||||||
|
注意: 针对浮点类型,如果配置为tsz, 其精度由taosd的全局配置决定,如果配置为tsz, 但是没有配置有损压缩标志, 则使用lz4进行压缩
|
||||||
|
|
||||||
|
## SQL 语法
|
||||||
|
|
||||||
|
### 建表时指定压缩
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE [dbname.]tabname (colName colType [ENCODE 'encode_type'] [COMPRESS 'compress_type' [LEVEL 'level'], [, other cerate_definition]...])
|
||||||
|
```
|
||||||
|
|
||||||
|
**参数说明**
|
||||||
|
|
||||||
|
- tabname:超级表或者普通表名称
|
||||||
|
- encode_type: 一级压缩,具体参数见上面列表
|
||||||
|
- compress_type: 二级压缩,具体参数见上面列表
|
||||||
|
- level: 特指二级压缩的级别,默认值为medium, 支持简写为 'h'/'l'/'m'
|
||||||
|
|
||||||
|
**功能说明**
|
||||||
|
|
||||||
|
- 创建表的时候指定列的压缩方式
|
||||||
|
|
||||||
|
### 更改列的压缩方式
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ALTER TABLE [db_name.]tabName MODIFY COLUMN colName [ENCODE 'ecode_type'] [COMPRESS 'compress_type'] [LEVEL "high"]
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
**参数说明**
|
||||||
|
|
||||||
|
- tabName: 表名,可以为超级表、普通表
|
||||||
|
- colName: 待更改压缩算法的列, 只能为普通列
|
||||||
|
|
||||||
|
**功能说明**
|
||||||
|
|
||||||
|
- 更改列的压缩方式
|
||||||
|
|
||||||
|
### 查看列的压缩方式
|
||||||
|
|
||||||
|
```sql
|
||||||
|
DESCRIBE [dbname.]tabName
|
||||||
|
```
|
||||||
|
|
||||||
|
**功能说明**
|
||||||
|
|
||||||
|
- 显示列的基本信息,包括类型、压缩方式
|
||||||
|
|
||||||
|
## 兼容性
|
||||||
|
|
||||||
|
- 完全兼容已经存在的数据
|
||||||
|
- 从更低版本升级到 3.3.0.0 后不能回退
|
|
@ -435,6 +435,7 @@ typedef struct SUpstreamInfo {
|
||||||
typedef struct SDownstreamStatusInfo {
|
typedef struct SDownstreamStatusInfo {
|
||||||
int64_t reqId;
|
int64_t reqId;
|
||||||
int32_t taskId;
|
int32_t taskId;
|
||||||
|
int32_t vgId;
|
||||||
int64_t rspTs;
|
int64_t rspTs;
|
||||||
int32_t status;
|
int32_t status;
|
||||||
} SDownstreamStatusInfo;
|
} SDownstreamStatusInfo;
|
||||||
|
@ -847,12 +848,9 @@ int32_t streamTaskSetDb(SStreamMeta* pMeta, void* pTask, char* key);
|
||||||
bool streamTaskIsSinkTask(const SStreamTask* pTask);
|
bool streamTaskIsSinkTask(const SStreamTask* pTask);
|
||||||
int32_t streamTaskSendCheckpointReq(SStreamTask* pTask);
|
int32_t streamTaskSendCheckpointReq(SStreamTask* pTask);
|
||||||
|
|
||||||
int32_t streamTaskAddReqInfo(STaskCheckInfo* pInfo, int64_t reqId, int32_t taskId, const char* id);
|
|
||||||
int32_t streamTaskUpdateCheckInfo(STaskCheckInfo* pInfo, int32_t taskId, int32_t status, int64_t rspTs, int64_t reqId,
|
|
||||||
int32_t* pNotReady, const char* id);
|
|
||||||
void streamTaskCleanupCheckInfo(STaskCheckInfo* pInfo);
|
|
||||||
int32_t streamTaskStartMonitorCheckRsp(SStreamTask* pTask);
|
int32_t streamTaskStartMonitorCheckRsp(SStreamTask* pTask);
|
||||||
int32_t streamTaskStopMonitorCheckRsp(STaskCheckInfo* pInfo, const char* id);
|
int32_t streamTaskStopMonitorCheckRsp(STaskCheckInfo* pInfo, const char* id);
|
||||||
|
void streamTaskCleanupCheckInfo(STaskCheckInfo* pInfo);
|
||||||
|
|
||||||
void streamTaskStatusInit(STaskStatusEntry* pEntry, const SStreamTask* pTask);
|
void streamTaskStatusInit(STaskStatusEntry* pEntry, const SStreamTask* pTask);
|
||||||
void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc);
|
void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc);
|
||||||
|
|
|
@ -106,12 +106,15 @@ int32_t cfgLoad(SConfig *pCfg, ECfgSrcType cfgType, const void *sourceStr);
|
||||||
int32_t cfgLoadFromArray(SConfig *pCfg, SArray *pArgs); // SConfigPair
|
int32_t cfgLoadFromArray(SConfig *pCfg, SArray *pArgs); // SConfigPair
|
||||||
void cfgCleanup(SConfig *pCfg);
|
void cfgCleanup(SConfig *pCfg);
|
||||||
int32_t cfgGetSize(SConfig *pCfg);
|
int32_t cfgGetSize(SConfig *pCfg);
|
||||||
SConfigItem *cfgGetItem(SConfig *pCfg, const char *name);
|
SConfigItem *cfgGetItem(SConfig *pCfg, const char *pName);
|
||||||
int32_t cfgSetItem(SConfig *pCfg, const char *name, const char *value, ECfgSrcType stype);
|
int32_t cfgSetItem(SConfig *pCfg, const char *name, const char *value, ECfgSrcType stype, bool lock);
|
||||||
int32_t cfgCheckRangeForDynUpdate(SConfig *pCfg, const char *name, const char *pVal, bool isServer);
|
int32_t cfgCheckRangeForDynUpdate(SConfig *pCfg, const char *name, const char *pVal, bool isServer);
|
||||||
|
|
||||||
SConfigIter *cfgCreateIter(SConfig *pConf);
|
SConfigIter *cfgCreateIter(SConfig *pConf);
|
||||||
SConfigItem *cfgNextIter(SConfigIter *pIter);
|
SConfigItem *cfgNextIter(SConfigIter *pIter);
|
||||||
void cfgDestroyIter(SConfigIter *pIter);
|
void cfgDestroyIter(SConfigIter *pIter);
|
||||||
|
void cfgLock(SConfig *pCfg);
|
||||||
|
void cfgUnLock(SConfig *pCfg);
|
||||||
|
|
||||||
// clang-format off
|
// clang-format off
|
||||||
int32_t cfgAddBool(SConfig *pCfg, const char *name, bool defaultVal, int8_t scope, int8_t dynScope);
|
int32_t cfgAddBool(SConfig *pCfg, const char *name, bool defaultVal, int8_t scope, int8_t dynScope);
|
||||||
|
|
|
@ -159,7 +159,7 @@ done
|
||||||
tools=(${clientName} ${benchmarkName} ${dumpName} ${demoName} remove.sh udfd set_core.sh TDinsight.sh start_pre.sh)
|
tools=(${clientName} ${benchmarkName} ${dumpName} ${demoName} remove.sh udfd set_core.sh TDinsight.sh start_pre.sh)
|
||||||
if [ "${verMode}" == "cluster" ]; then
|
if [ "${verMode}" == "cluster" ]; then
|
||||||
services=(${serverName} ${adapterName} ${xname} ${explorerName} ${keeperName})
|
services=(${serverName} ${adapterName} ${xname} ${explorerName} ${keeperName})
|
||||||
elif [ "${verMode}" == "community" ]; then
|
elif [ "${verMode}" == "edge" ]; then
|
||||||
if [ "${pagMode}" == "full" ]; then
|
if [ "${pagMode}" == "full" ]; then
|
||||||
services=(${serverName} ${adapterName} ${keeperName} ${explorerName})
|
services=(${serverName} ${adapterName} ${keeperName} ${explorerName})
|
||||||
else
|
else
|
||||||
|
@ -229,6 +229,10 @@ function install_bin() {
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ -f ${script_dir}/bin/quick_deploy.sh ]; then
|
||||||
|
${csudo}cp -r ${script_dir}/bin/quick_deploy.sh ${install_main_dir}/bin
|
||||||
|
fi
|
||||||
|
|
||||||
${csudo}chmod 0555 ${install_main_dir}/bin/*
|
${csudo}chmod 0555 ${install_main_dir}/bin/*
|
||||||
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}mv ${install_main_dir}/bin/remove.sh ${install_main_dir}/uninstall.sh || :
|
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}mv ${install_main_dir}/bin/remove.sh ${install_main_dir}/uninstall.sh || :
|
||||||
|
|
||||||
|
@ -503,7 +507,7 @@ function local_fqdn_check() {
|
||||||
function install_taosx_config() {
|
function install_taosx_config() {
|
||||||
[ ! -z $1 ] && return 0 || : # only install client
|
[ ! -z $1 ] && return 0 || : # only install client
|
||||||
|
|
||||||
fileName="${script_dir}/${xname}/etc/taos/${xname}.toml"
|
fileName="${script_dir}/${xname}/etc/${PREFIX}/${xname}.toml"
|
||||||
if [ -f ${fileName} ]; then
|
if [ -f ${fileName} ]; then
|
||||||
${csudo}sed -i -r "s/#*\s*(fqdn\s*=\s*).*/\1\"${serverFqdn}\"/" ${fileName}
|
${csudo}sed -i -r "s/#*\s*(fqdn\s*=\s*).*/\1\"${serverFqdn}\"/" ${fileName}
|
||||||
|
|
||||||
|
@ -520,10 +524,11 @@ function install_explorer_config() {
|
||||||
[ ! -z $1 ] && return 0 || : # only install client
|
[ ! -z $1 ] && return 0 || : # only install client
|
||||||
|
|
||||||
if [ "$verMode" == "cluster" ]; then
|
if [ "$verMode" == "cluster" ]; then
|
||||||
fileName="${script_dir}/${xname}/etc/taos/explorer.toml"
|
fileName="${script_dir}/${xname}/etc/${PREFIX}/explorer.toml"
|
||||||
else
|
else
|
||||||
fileName="${script_dir}/cfg/explorer.toml"
|
fileName="${script_dir}/cfg/explorer.toml"
|
||||||
}
|
fi
|
||||||
|
|
||||||
if [ -f ${fileName} ]; then
|
if [ -f ${fileName} ]; then
|
||||||
${csudo}sed -i "s/localhost/${serverFqdn}/g" ${fileName}
|
${csudo}sed -i "s/localhost/${serverFqdn}/g" ${fileName}
|
||||||
|
|
||||||
|
@ -655,13 +660,13 @@ function install_connector() {
|
||||||
|
|
||||||
function install_examples() {
|
function install_examples() {
|
||||||
if [ -d ${script_dir}/examples ]; then
|
if [ -d ${script_dir}/examples ]; then
|
||||||
${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples || echo "failed to copy examples"
|
${csudo}cp -rf ${script_dir}/examples ${install_main_dir}/ || echo "failed to copy examples"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function install_plugins() {
|
function install_plugins() {
|
||||||
if [ -d ${script_dir}/${xname}/plugins ]; then
|
if [ -d ${script_dir}/${xname}/plugins ]; then
|
||||||
${csudo}cp -rf ${script_dir}/${xname}/plugins/ ${install_main_dir}/ || echo "failed to copy taosx plugins"
|
${csudo}cp -rf ${script_dir}/${xname}/plugins/ ${install_main_dir}/ || echo "failed to copy ${PREFIX}x plugins"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -838,7 +838,7 @@ int taos_options_imp(TSDB_OPTION option, const char *str) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int code = cfgSetItem(pCfg, name, str, CFG_STYPE_TAOS_OPTIONS);
|
int code = cfgSetItem(pCfg, name, str, CFG_STYPE_TAOS_OPTIONS, true);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
tscError("failed to set cfg:%s to %s since %s", name, str, terrstr());
|
tscError("failed to set cfg:%s to %s since %s", name, str, terrstr());
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -832,7 +832,7 @@ static int32_t smlFindNearestPowerOf2(int32_t length, uint8_t type) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t smlProcessSchemaAction(SSmlHandle *info, SSchema *schemaField, SHashObj *schemaHash, SArray *cols,
|
static int32_t smlProcessSchemaAction(SSmlHandle *info, SSchema *schemaField, SHashObj *schemaHash, SArray *cols, SArray *checkDumplicateCols,
|
||||||
ESchemaAction *action, bool isTag) {
|
ESchemaAction *action, bool isTag) {
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
for (int j = 0; j < taosArrayGetSize(cols); ++j) {
|
for (int j = 0; j < taosArrayGetSize(cols); ++j) {
|
||||||
|
@ -843,6 +843,13 @@ static int32_t smlProcessSchemaAction(SSmlHandle *info, SSchema *schemaField, SH
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (int j = 0; j < taosArrayGetSize(checkDumplicateCols); ++j) {
|
||||||
|
SSmlKv *kv = (SSmlKv *)taosArrayGet(checkDumplicateCols, j);
|
||||||
|
if(taosHashGet(schemaHash, kv->key, kv->keyLen) != NULL){
|
||||||
|
return TSDB_CODE_PAR_DUPLICATED_COLUMN;
|
||||||
|
}
|
||||||
|
}
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1106,7 +1113,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ESchemaAction action = SCHEMA_ACTION_NULL;
|
ESchemaAction action = SCHEMA_ACTION_NULL;
|
||||||
code = smlProcessSchemaAction(info, pTableMeta->schema, hashTmp, sTableData->tags, &action, true);
|
code = smlProcessSchemaAction(info, pTableMeta->schema, hashTmp, sTableData->tags, sTableData->cols, &action, true);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
@ -1181,7 +1188,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
||||||
taosHashPut(hashTmp, pTableMeta->schema[i].name, strlen(pTableMeta->schema[i].name), &i, SHORT_BYTES);
|
taosHashPut(hashTmp, pTableMeta->schema[i].name, strlen(pTableMeta->schema[i].name), &i, SHORT_BYTES);
|
||||||
}
|
}
|
||||||
action = SCHEMA_ACTION_NULL;
|
action = SCHEMA_ACTION_NULL;
|
||||||
code = smlProcessSchemaAction(info, pTableMeta->schema, hashTmp, sTableData->cols, &action, false);
|
code = smlProcessSchemaAction(info, pTableMeta->schema, hashTmp, sTableData->cols, sTableData->tags, &action, false);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
|
@ -191,8 +191,8 @@ static const SSysDbTableSchema streamTaskSchema[] = {
|
||||||
{.name = "start_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
|
{.name = "start_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
|
||||||
{.name = "start_ver", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
|
{.name = "start_ver", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
|
||||||
{.name = "checkpoint_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
|
{.name = "checkpoint_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
|
||||||
{.name = "checkpoint_id", .bytes = 25, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
|
{.name = "checkpoint_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
|
||||||
{.name = "checkpoint_version", .bytes = 25, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
|
{.name = "checkpoint_version", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
|
||||||
{.name = "ds_err_info", .bytes = 25, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
{.name = "ds_err_info", .bytes = 25, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||||
{.name = "history_task_id", .bytes = 16 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
{.name = "history_task_id", .bytes = 16 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||||
{.name = "history_task_status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
{.name = "history_task_status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||||
|
|
|
@ -271,7 +271,7 @@ int32_t tsTtlBatchDropNum = 10000; // number of tables dropped per batch
|
||||||
int32_t tsTransPullupInterval = 2;
|
int32_t tsTransPullupInterval = 2;
|
||||||
int32_t tsCompactPullupInterval = 10;
|
int32_t tsCompactPullupInterval = 10;
|
||||||
int32_t tsMqRebalanceInterval = 2;
|
int32_t tsMqRebalanceInterval = 2;
|
||||||
int32_t tsStreamCheckpointInterval = 300;
|
int32_t tsStreamCheckpointInterval = 60;
|
||||||
float tsSinkDataRate = 2.0;
|
float tsSinkDataRate = 2.0;
|
||||||
int32_t tsStreamNodeCheckInterval = 16;
|
int32_t tsStreamNodeCheckInterval = 16;
|
||||||
int32_t tsTtlUnit = 86400;
|
int32_t tsTtlUnit = 86400;
|
||||||
|
@ -1081,13 +1081,13 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
|
||||||
SEp firstEp = {0};
|
SEp firstEp = {0};
|
||||||
taosGetFqdnPortFromEp(strlen(pFirstEpItem->str) == 0 ? defaultFirstEp : pFirstEpItem->str, &firstEp);
|
taosGetFqdnPortFromEp(strlen(pFirstEpItem->str) == 0 ? defaultFirstEp : pFirstEpItem->str, &firstEp);
|
||||||
snprintf(tsFirst, sizeof(tsFirst), "%s:%u", firstEp.fqdn, firstEp.port);
|
snprintf(tsFirst, sizeof(tsFirst), "%s:%u", firstEp.fqdn, firstEp.port);
|
||||||
cfgSetItem(pCfg, "firstEp", tsFirst, pFirstEpItem->stype);
|
cfgSetItem(pCfg, "firstEp", tsFirst, pFirstEpItem->stype, true);
|
||||||
|
|
||||||
SConfigItem *pSecondpItem = cfgGetItem(pCfg, "secondEp");
|
SConfigItem *pSecondpItem = cfgGetItem(pCfg, "secondEp");
|
||||||
SEp secondEp = {0};
|
SEp secondEp = {0};
|
||||||
taosGetFqdnPortFromEp(strlen(pSecondpItem->str) == 0 ? defaultFirstEp : pSecondpItem->str, &secondEp);
|
taosGetFqdnPortFromEp(strlen(pSecondpItem->str) == 0 ? defaultFirstEp : pSecondpItem->str, &secondEp);
|
||||||
snprintf(tsSecond, sizeof(tsSecond), "%s:%u", secondEp.fqdn, secondEp.port);
|
snprintf(tsSecond, sizeof(tsSecond), "%s:%u", secondEp.fqdn, secondEp.port);
|
||||||
cfgSetItem(pCfg, "secondEp", tsSecond, pSecondpItem->stype);
|
cfgSetItem(pCfg, "secondEp", tsSecond, pSecondpItem->stype, true);
|
||||||
|
|
||||||
tstrncpy(tsTempDir, cfgGetItem(pCfg, "tempDir")->str, PATH_MAX);
|
tstrncpy(tsTempDir, cfgGetItem(pCfg, "tempDir")->str, PATH_MAX);
|
||||||
taosExpandDir(tsTempDir, tsTempDir, PATH_MAX);
|
taosExpandDir(tsTempDir, tsTempDir, PATH_MAX);
|
||||||
|
@ -1149,9 +1149,10 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
|
||||||
|
|
||||||
static void taosSetSystemCfg(SConfig *pCfg) {
|
static void taosSetSystemCfg(SConfig *pCfg) {
|
||||||
SConfigItem *pItem = cfgGetItem(pCfg, "timezone");
|
SConfigItem *pItem = cfgGetItem(pCfg, "timezone");
|
||||||
|
|
||||||
osSetTimezone(pItem->str);
|
osSetTimezone(pItem->str);
|
||||||
uDebug("timezone format changed from %s to %s", pItem->str, tsTimezoneStr);
|
uDebug("timezone format changed from %s to %s", pItem->str, tsTimezoneStr);
|
||||||
cfgSetItem(pCfg, "timezone", tsTimezoneStr, pItem->stype);
|
cfgSetItem(pCfg, "timezone", tsTimezoneStr, pItem->stype, true);
|
||||||
|
|
||||||
const char *locale = cfgGetItem(pCfg, "locale")->str;
|
const char *locale = cfgGetItem(pCfg, "locale")->str;
|
||||||
const char *charset = cfgGetItem(pCfg, "charset")->str;
|
const char *charset = cfgGetItem(pCfg, "charset")->str;
|
||||||
|
@ -1515,15 +1516,20 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cfgLock(pCfg);
|
||||||
|
|
||||||
SConfigItem *pItem = cfgGetItem(pCfg, name);
|
SConfigItem *pItem = cfgGetItem(pCfg, name);
|
||||||
if (!pItem || (pItem->dynScope & CFG_DYN_SERVER) == 0) {
|
if (!pItem || (pItem->dynScope & CFG_DYN_SERVER) == 0) {
|
||||||
uError("failed to config:%s, not support", name);
|
uError("failed to config:%s, not support", name);
|
||||||
terrno = TSDB_CODE_INVALID_CFG;
|
terrno = TSDB_CODE_INVALID_CFG;
|
||||||
|
|
||||||
|
cfgUnLock(pCfg);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (strncasecmp(name, "debugFlag", 9) == 0) {
|
if (strncasecmp(name, "debugFlag", 9) == 0) {
|
||||||
taosSetAllDebugFlag(pCfg, pItem->i32);
|
taosSetAllDebugFlag(pCfg, pItem->i32);
|
||||||
|
cfgUnLock(pCfg);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1580,17 +1586,21 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cfgUnLock(pCfg);
|
||||||
return terrno == TSDB_CODE_SUCCESS ? 0 : -1;
|
return terrno == TSDB_CODE_SUCCESS ? 0 : -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo fix race condition caused by update of config, pItem->str may be removed
|
|
||||||
static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) {
|
static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) {
|
||||||
terrno = TSDB_CODE_SUCCESS;
|
terrno = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
cfgLock(pCfg);
|
||||||
|
|
||||||
SConfigItem *pItem = cfgGetItem(pCfg, name);
|
SConfigItem *pItem = cfgGetItem(pCfg, name);
|
||||||
if ((pItem == NULL) || (pItem->dynScope & CFG_DYN_CLIENT) == 0) {
|
if ((pItem == NULL) || (pItem->dynScope & CFG_DYN_CLIENT) == 0) {
|
||||||
uError("failed to config:%s, not support", name);
|
uError("failed to config:%s, not support", name);
|
||||||
terrno = TSDB_CODE_INVALID_CFG;
|
terrno = TSDB_CODE_INVALID_CFG;
|
||||||
|
|
||||||
|
cfgUnLock(pCfg);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1630,7 +1640,8 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) {
|
||||||
SEp firstEp = {0};
|
SEp firstEp = {0};
|
||||||
taosGetFqdnPortFromEp(strlen(pFirstEpItem->str) == 0 ? defaultFirstEp : pFirstEpItem->str, &firstEp);
|
taosGetFqdnPortFromEp(strlen(pFirstEpItem->str) == 0 ? defaultFirstEp : pFirstEpItem->str, &firstEp);
|
||||||
snprintf(tsFirst, sizeof(tsFirst), "%s:%u", firstEp.fqdn, firstEp.port);
|
snprintf(tsFirst, sizeof(tsFirst), "%s:%u", firstEp.fqdn, firstEp.port);
|
||||||
cfgSetItem(pCfg, "firstEp", tsFirst, pFirstEpItem->stype);
|
|
||||||
|
cfgSetItem(pCfg, "firstEp", tsFirst, pFirstEpItem->stype, false);
|
||||||
uInfo("localEp set to '%s', tsFirst set to '%s'", tsLocalEp, tsFirst);
|
uInfo("localEp set to '%s', tsFirst set to '%s'", tsLocalEp, tsFirst);
|
||||||
matched = true;
|
matched = true;
|
||||||
} else if (strcasecmp("firstEp", name) == 0) {
|
} else if (strcasecmp("firstEp", name) == 0) {
|
||||||
|
@ -1645,7 +1656,8 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) {
|
||||||
SEp firstEp = {0};
|
SEp firstEp = {0};
|
||||||
taosGetFqdnPortFromEp(strlen(pFirstEpItem->str) == 0 ? defaultFirstEp : pFirstEpItem->str, &firstEp);
|
taosGetFqdnPortFromEp(strlen(pFirstEpItem->str) == 0 ? defaultFirstEp : pFirstEpItem->str, &firstEp);
|
||||||
snprintf(tsFirst, sizeof(tsFirst), "%s:%u", firstEp.fqdn, firstEp.port);
|
snprintf(tsFirst, sizeof(tsFirst), "%s:%u", firstEp.fqdn, firstEp.port);
|
||||||
cfgSetItem(pCfg, "firstEp", tsFirst, pFirstEpItem->stype);
|
|
||||||
|
cfgSetItem(pCfg, "firstEp", tsFirst, pFirstEpItem->stype, false);
|
||||||
uInfo("localEp set to '%s', tsFirst set to '%s'", tsLocalEp, tsFirst);
|
uInfo("localEp set to '%s', tsFirst set to '%s'", tsLocalEp, tsFirst);
|
||||||
matched = true;
|
matched = true;
|
||||||
}
|
}
|
||||||
|
@ -1692,7 +1704,7 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) {
|
||||||
SEp secondEp = {0};
|
SEp secondEp = {0};
|
||||||
taosGetFqdnPortFromEp(strlen(pItem->str) == 0 ? tsFirst : pItem->str, &secondEp);
|
taosGetFqdnPortFromEp(strlen(pItem->str) == 0 ? tsFirst : pItem->str, &secondEp);
|
||||||
snprintf(tsSecond, sizeof(tsSecond), "%s:%u", secondEp.fqdn, secondEp.port);
|
snprintf(tsSecond, sizeof(tsSecond), "%s:%u", secondEp.fqdn, secondEp.port);
|
||||||
cfgSetItem(pCfg, "secondEp", tsSecond, pItem->stype);
|
cfgSetItem(pCfg, "secondEp", tsSecond, pItem->stype, false);
|
||||||
uInfo("%s set to %s", name, tsSecond);
|
uInfo("%s set to %s", name, tsSecond);
|
||||||
matched = true;
|
matched = true;
|
||||||
} else if (strcasecmp("smlChildTableName", name) == 0) {
|
} else if (strcasecmp("smlChildTableName", name) == 0) {
|
||||||
|
@ -1723,11 +1735,13 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) {
|
||||||
SEp firstEp = {0};
|
SEp firstEp = {0};
|
||||||
taosGetFqdnPortFromEp(strlen(pFirstEpItem->str) == 0 ? defaultFirstEp : pFirstEpItem->str, &firstEp);
|
taosGetFqdnPortFromEp(strlen(pFirstEpItem->str) == 0 ? defaultFirstEp : pFirstEpItem->str, &firstEp);
|
||||||
snprintf(tsFirst, sizeof(tsFirst), "%s:%u", firstEp.fqdn, firstEp.port);
|
snprintf(tsFirst, sizeof(tsFirst), "%s:%u", firstEp.fqdn, firstEp.port);
|
||||||
cfgSetItem(pCfg, "firstEp", tsFirst, pFirstEpItem->stype);
|
|
||||||
|
cfgSetItem(pCfg, "firstEp", tsFirst, pFirstEpItem->stype, false);
|
||||||
uInfo("localEp set to '%s', tsFirst set to '%s'", tsLocalEp, tsFirst);
|
uInfo("localEp set to '%s', tsFirst set to '%s'", tsLocalEp, tsFirst);
|
||||||
matched = true;
|
matched = true;
|
||||||
} else if (strcasecmp("slowLogScope", name) == 0) {
|
} else if (strcasecmp("slowLogScope", name) == 0) {
|
||||||
if (taosSetSlowLogScope(pItem->str)) {
|
if (taosSetSlowLogScope(pItem->str)) {
|
||||||
|
cfgUnLock(pCfg);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
uInfo("%s set to %s", name, pItem->str);
|
uInfo("%s set to %s", name, pItem->str);
|
||||||
|
@ -1739,7 +1753,8 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) {
|
||||||
if (strcasecmp("timezone", name) == 0) {
|
if (strcasecmp("timezone", name) == 0) {
|
||||||
osSetTimezone(pItem->str);
|
osSetTimezone(pItem->str);
|
||||||
uInfo("%s set from %s to %s", name, tsTimezoneStr, pItem->str);
|
uInfo("%s set from %s to %s", name, tsTimezoneStr, pItem->str);
|
||||||
cfgSetItem(pCfg, "timezone", tsTimezoneStr, pItem->stype);
|
|
||||||
|
cfgSetItem(pCfg, "timezone", tsTimezoneStr, pItem->stype, false);
|
||||||
matched = true;
|
matched = true;
|
||||||
} else if (strcasecmp("tempDir", name) == 0) {
|
} else if (strcasecmp("tempDir", name) == 0) {
|
||||||
uInfo("%s set from %s to %s", name, tsTempDir, pItem->str);
|
uInfo("%s set from %s to %s", name, tsTempDir, pItem->str);
|
||||||
|
@ -1747,6 +1762,7 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) {
|
||||||
taosExpandDir(tsTempDir, tsTempDir, PATH_MAX);
|
taosExpandDir(tsTempDir, tsTempDir, PATH_MAX);
|
||||||
if (taosMulMkDir(tsTempDir) != 0) {
|
if (taosMulMkDir(tsTempDir) != 0) {
|
||||||
uError("failed to create tempDir:%s since %s", tsTempDir, terrstr());
|
uError("failed to create tempDir:%s since %s", tsTempDir, terrstr());
|
||||||
|
cfgUnLock(pCfg);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
matched = true;
|
matched = true;
|
||||||
|
@ -1802,6 +1818,7 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) {
|
||||||
}
|
}
|
||||||
|
|
||||||
_out:
|
_out:
|
||||||
|
cfgUnLock(pCfg);
|
||||||
return terrno == TSDB_CODE_SUCCESS ? 0 : -1;
|
return terrno == TSDB_CODE_SUCCESS ? 0 : -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1878,7 +1895,9 @@ static void taosSetAllDebugFlag(SConfig *pCfg, int32_t flag) {
|
||||||
taosArrayClear(noNeedToSetVars); // reset array
|
taosArrayClear(noNeedToSetVars); // reset array
|
||||||
|
|
||||||
uInfo("all debug flag are set to %d", flag);
|
uInfo("all debug flag are set to %d", flag);
|
||||||
if (terrno == TSDB_CODE_CFG_NOT_FOUND) terrno = TSDB_CODE_SUCCESS; // ignore not exist
|
if (terrno == TSDB_CODE_CFG_NOT_FOUND) {
|
||||||
|
terrno = TSDB_CODE_SUCCESS; // ignore not exist
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int8_t taosGranted(int8_t type) {
|
int8_t taosGranted(int8_t type) {
|
||||||
|
|
|
@ -219,7 +219,7 @@ int32_t dmProcessConfigReq(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||||
dInfo("start to config, option:%s, value:%s", cfgReq.config, cfgReq.value);
|
dInfo("start to config, option:%s, value:%s", cfgReq.config, cfgReq.value);
|
||||||
|
|
||||||
SConfig *pCfg = taosGetCfg();
|
SConfig *pCfg = taosGetCfg();
|
||||||
cfgSetItem(pCfg, cfgReq.config, cfgReq.value, CFG_STYPE_ALTER_CMD);
|
cfgSetItem(pCfg, cfgReq.config, cfgReq.value, CFG_STYPE_ALTER_CMD, true);
|
||||||
taosCfgDynamicOptions(pCfg, cfgReq.config, true);
|
taosCfgDynamicOptions(pCfg, cfgReq.config, true);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -254,7 +254,6 @@ static void dmGetServerRunStatus(SDnodeMgmt *pMgmt, SServerStatusRsp *pStatus) {
|
||||||
pStatus->statusCode = TSDB_SRV_STATUS_SERVICE_OK;
|
pStatus->statusCode = TSDB_SRV_STATUS_SERVICE_OK;
|
||||||
pStatus->details[0] = 0;
|
pStatus->details[0] = 0;
|
||||||
|
|
||||||
SServerStatusRsp statusRsp = {0};
|
|
||||||
SMonMloadInfo minfo = {0};
|
SMonMloadInfo minfo = {0};
|
||||||
(*pMgmt->getMnodeLoadsFp)(&minfo);
|
(*pMgmt->getMnodeLoadsFp)(&minfo);
|
||||||
if (minfo.isMnode &&
|
if (minfo.isMnode &&
|
||||||
|
|
|
@ -16,6 +16,8 @@
|
||||||
#define _DEFAULT_SOURCE
|
#define _DEFAULT_SOURCE
|
||||||
#include "mmInt.h"
|
#include "mmInt.h"
|
||||||
|
|
||||||
|
#define PROCESS_THRESHOLD (2000 * 1000)
|
||||||
|
|
||||||
static inline int32_t mmAcquire(SMnodeMgmt *pMgmt) {
|
static inline int32_t mmAcquire(SMnodeMgmt *pMgmt) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
taosThreadRwlockRdlock(&pMgmt->lock);
|
taosThreadRwlockRdlock(&pMgmt->lock);
|
||||||
|
@ -53,6 +55,14 @@ static void mmProcessRpcMsg(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
||||||
|
|
||||||
int32_t code = mndProcessRpcMsg(pMsg);
|
int32_t code = mndProcessRpcMsg(pMsg);
|
||||||
|
|
||||||
|
if (pInfo->timestamp != 0) {
|
||||||
|
int64_t cost = taosGetTimestampUs() - pInfo->timestamp;
|
||||||
|
if (cost > PROCESS_THRESHOLD) {
|
||||||
|
dGWarn("worker:%d,message has been processed for too long, type:%s, cost: %" PRId64 "s", pInfo->threadNum,
|
||||||
|
TMSG_INFO(pMsg->msgType), cost / (1000 * 1000));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (IsReq(pMsg) && pMsg->info.handle != NULL && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
if (IsReq(pMsg) && pMsg->info.handle != NULL && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||||
if (code != 0 && terrno != 0) code = terrno;
|
if (code != 0 && terrno != 0) code = terrno;
|
||||||
mmSendRsp(pMsg, code);
|
mmSendRsp(pMsg, code);
|
||||||
|
|
|
@ -27,6 +27,8 @@
|
||||||
#define ARBGROUP_VER_NUMBER 1
|
#define ARBGROUP_VER_NUMBER 1
|
||||||
#define ARBGROUP_RESERVE_SIZE 64
|
#define ARBGROUP_RESERVE_SIZE 64
|
||||||
|
|
||||||
|
static SHashObj *arbUpdateHash = NULL;
|
||||||
|
|
||||||
static int32_t mndArbGroupActionInsert(SSdb *pSdb, SArbGroup *pGroup);
|
static int32_t mndArbGroupActionInsert(SSdb *pSdb, SArbGroup *pGroup);
|
||||||
static int32_t mndArbGroupActionUpdate(SSdb *pSdb, SArbGroup *pOld, SArbGroup *pNew);
|
static int32_t mndArbGroupActionUpdate(SSdb *pSdb, SArbGroup *pOld, SArbGroup *pNew);
|
||||||
static int32_t mndArbGroupActionDelete(SSdb *pSdb, SArbGroup *pGroup);
|
static int32_t mndArbGroupActionDelete(SSdb *pSdb, SArbGroup *pGroup);
|
||||||
|
@ -74,10 +76,14 @@ int32_t mndInitArbGroup(SMnode *pMnode) {
|
||||||
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_ARBGROUP, mndRetrieveArbGroups);
|
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_ARBGROUP, mndRetrieveArbGroups);
|
||||||
mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_ARBGROUP, mndCancelGetNextArbGroup);
|
mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_ARBGROUP, mndCancelGetNextArbGroup);
|
||||||
|
|
||||||
|
arbUpdateHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_ENTRY_LOCK);
|
||||||
|
|
||||||
return sdbSetTable(pMnode->pSdb, table);
|
return sdbSetTable(pMnode->pSdb, table);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mndCleanupArbGroup(SMnode *pMnode) {}
|
void mndCleanupArbGroup(SMnode *pMnode) {
|
||||||
|
taosHashCleanup(arbUpdateHash);
|
||||||
|
}
|
||||||
|
|
||||||
SArbGroup *mndAcquireArbGroup(SMnode *pMnode, int32_t vgId) {
|
SArbGroup *mndAcquireArbGroup(SMnode *pMnode, int32_t vgId) {
|
||||||
SArbGroup *pGroup = sdbAcquire(pMnode->pSdb, SDB_ARBGROUP, &vgId);
|
SArbGroup *pGroup = sdbAcquire(pMnode->pSdb, SDB_ARBGROUP, &vgId);
|
||||||
|
@ -221,8 +227,7 @@ static int32_t mndArbGroupActionUpdate(SSdb *pSdb, SArbGroup *pOld, SArbGroup *p
|
||||||
mInfo("arbgroup:%d, skip to perform update action, old row:%p new row:%p, old version:%" PRId64
|
mInfo("arbgroup:%d, skip to perform update action, old row:%p new row:%p, old version:%" PRId64
|
||||||
" new version:%" PRId64,
|
" new version:%" PRId64,
|
||||||
pOld->vgId, pOld, pNew, pOld->version, pNew->version);
|
pOld->vgId, pOld, pNew, pOld->version, pNew->version);
|
||||||
taosThreadMutexUnlock(&pOld->mutex);
|
goto _OVER;
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < TSDB_ARB_GROUP_MEMBER_NUM; i++) {
|
for (int i = 0; i < TSDB_ARB_GROUP_MEMBER_NUM; i++) {
|
||||||
|
@ -232,7 +237,11 @@ static int32_t mndArbGroupActionUpdate(SSdb *pSdb, SArbGroup *pOld, SArbGroup *p
|
||||||
pOld->assignedLeader.dnodeId = pNew->assignedLeader.dnodeId;
|
pOld->assignedLeader.dnodeId = pNew->assignedLeader.dnodeId;
|
||||||
memcpy(pOld->assignedLeader.token, pNew->assignedLeader.token, TSDB_ARB_TOKEN_SIZE);
|
memcpy(pOld->assignedLeader.token, pNew->assignedLeader.token, TSDB_ARB_TOKEN_SIZE);
|
||||||
pOld->version++;
|
pOld->version++;
|
||||||
|
|
||||||
|
_OVER:
|
||||||
taosThreadMutexUnlock(&pOld->mutex);
|
taosThreadMutexUnlock(&pOld->mutex);
|
||||||
|
|
||||||
|
taosHashRemove(arbUpdateHash, &pOld->vgId, sizeof(int32_t));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -645,6 +654,11 @@ static void *mndBuildArbUpdateGroupReq(int32_t *pContLen, SArbGroup *pNewGroup)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t mndPullupArbUpdateGroup(SMnode *pMnode, SArbGroup *pNewGroup) {
|
static int32_t mndPullupArbUpdateGroup(SMnode *pMnode, SArbGroup *pNewGroup) {
|
||||||
|
if (taosHashGet(arbUpdateHash, &pNewGroup->vgId, sizeof(pNewGroup->vgId)) != NULL) {
|
||||||
|
mInfo("vgId:%d, arb skip to pullup arb-update-group request, since it is in process", pNewGroup->vgId);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t contLen = 0;
|
int32_t contLen = 0;
|
||||||
void *pHead = mndBuildArbUpdateGroupReq(&contLen, pNewGroup);
|
void *pHead = mndBuildArbUpdateGroupReq(&contLen, pNewGroup);
|
||||||
if (!pHead) {
|
if (!pHead) {
|
||||||
|
@ -653,7 +667,11 @@ static int32_t mndPullupArbUpdateGroup(SMnode *pMnode, SArbGroup *pNewGroup) {
|
||||||
}
|
}
|
||||||
SRpcMsg rpcMsg = {.msgType = TDMT_MND_ARB_UPDATE_GROUP, .pCont = pHead, .contLen = contLen, .info.noResp = true};
|
SRpcMsg rpcMsg = {.msgType = TDMT_MND_ARB_UPDATE_GROUP, .pCont = pHead, .contLen = contLen, .info.noResp = true};
|
||||||
|
|
||||||
return tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg);
|
int32_t ret = tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg);
|
||||||
|
if (ret == 0) {
|
||||||
|
taosHashPut(arbUpdateHash, &pNewGroup->vgId, sizeof(pNewGroup->vgId), NULL, 0);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t mndProcessArbUpdateGroupReq(SRpcMsg *pReq) {
|
static int32_t mndProcessArbUpdateGroupReq(SRpcMsg *pReq) {
|
||||||
|
@ -930,8 +948,12 @@ static int32_t mndProcessArbCheckSyncRsp(SRpcMsg *pRsp) {
|
||||||
|
|
||||||
SVArbCheckSyncRsp syncRsp = {0};
|
SVArbCheckSyncRsp syncRsp = {0};
|
||||||
if (tDeserializeSVArbCheckSyncRsp(pRsp->pCont, pRsp->contLen, &syncRsp) != 0) {
|
if (tDeserializeSVArbCheckSyncRsp(pRsp->pCont, pRsp->contLen, &syncRsp) != 0) {
|
||||||
terrno = TSDB_CODE_INVALID_MSG;
|
|
||||||
mInfo("arb sync check failed, since:%s", tstrerror(pRsp->code));
|
mInfo("arb sync check failed, since:%s", tstrerror(pRsp->code));
|
||||||
|
if (pRsp->code == TSDB_CODE_MND_ARB_TOKEN_MISMATCH) {
|
||||||
|
terrno = TSDB_CODE_SUCCESS;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
terrno = TSDB_CODE_INVALID_MSG;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1457,8 +1457,8 @@ static void mndCreateTSMABuildCreateStreamReq(SCreateTSMACxt *pCxt) {
|
||||||
pCxt->pCreateStreamReq->igUpdate = 0;
|
pCxt->pCreateStreamReq->igUpdate = 0;
|
||||||
pCxt->pCreateStreamReq->lastTs = pCxt->pCreateSmaReq->lastTs;
|
pCxt->pCreateStreamReq->lastTs = pCxt->pCreateSmaReq->lastTs;
|
||||||
pCxt->pCreateStreamReq->smaId = pCxt->pSma->uid;
|
pCxt->pCreateStreamReq->smaId = pCxt->pSma->uid;
|
||||||
pCxt->pCreateStreamReq->ast = strdup(pCxt->pCreateSmaReq->ast);
|
pCxt->pCreateStreamReq->ast = taosStrdup(pCxt->pCreateSmaReq->ast);
|
||||||
pCxt->pCreateStreamReq->sql = strdup(pCxt->pCreateSmaReq->sql);
|
pCxt->pCreateStreamReq->sql = taosStrdup(pCxt->pCreateSmaReq->sql);
|
||||||
|
|
||||||
// construct tags
|
// construct tags
|
||||||
pCxt->pCreateStreamReq->pTags = taosArrayInit(pCxt->pCreateStreamReq->numOfTags, sizeof(SField));
|
pCxt->pCreateStreamReq->pTags = taosArrayInit(pCxt->pCreateStreamReq->numOfTags, sizeof(SField));
|
||||||
|
@ -1494,7 +1494,7 @@ static void mndCreateTSMABuildCreateStreamReq(SCreateTSMACxt *pCxt) {
|
||||||
static void mndCreateTSMABuildDropStreamReq(SCreateTSMACxt* pCxt) {
|
static void mndCreateTSMABuildDropStreamReq(SCreateTSMACxt* pCxt) {
|
||||||
tstrncpy(pCxt->pDropStreamReq->name, pCxt->streamName, TSDB_STREAM_FNAME_LEN);
|
tstrncpy(pCxt->pDropStreamReq->name, pCxt->streamName, TSDB_STREAM_FNAME_LEN);
|
||||||
pCxt->pDropStreamReq->igNotExists = false;
|
pCxt->pDropStreamReq->igNotExists = false;
|
||||||
pCxt->pDropStreamReq->sql = strdup(pCxt->pDropSmaReq->name);
|
pCxt->pDropStreamReq->sql = taosStrdup(pCxt->pDropSmaReq->name);
|
||||||
pCxt->pDropStreamReq->sqlLen = strlen(pCxt->pDropStreamReq->sql);
|
pCxt->pDropStreamReq->sqlLen = strlen(pCxt->pDropStreamReq->sql);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -847,7 +847,7 @@ int64_t mndStreamGenChkptId(SMnode *pMnode, bool lock) {
|
||||||
if (pIter == NULL) break;
|
if (pIter == NULL) break;
|
||||||
|
|
||||||
maxChkptId = TMAX(maxChkptId, pStream->checkpointId);
|
maxChkptId = TMAX(maxChkptId, pStream->checkpointId);
|
||||||
mDebug("stream:%p, %s id:%" PRIx64 "checkpoint %" PRId64 "", pStream, pStream->name, pStream->uid,
|
mDebug("stream:%p, %s id:0x%" PRIx64 " checkpoint %" PRId64 "", pStream, pStream->name, pStream->uid,
|
||||||
pStream->checkpointId);
|
pStream->checkpointId);
|
||||||
sdbRelease(pSdb, pStream);
|
sdbRelease(pSdb, pStream);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1156,14 +1156,24 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
|
||||||
|
|
||||||
// check if the checkpoint msg already sent or not.
|
// check if the checkpoint msg already sent or not.
|
||||||
if (status == TASK_STATUS__CK) {
|
if (status == TASK_STATUS__CK) {
|
||||||
tqWarn("s-task:%s recv checkpoint-source msg again checkpointId:%" PRId64
|
tqWarn("s-task:%s repeatly recv checkpoint-source msg checkpointId:%" PRId64
|
||||||
" transId:%d already received, ignore this msg and continue process checkpoint",
|
" transId:%d already handled, ignore msg and continue process checkpoint",
|
||||||
pTask->id.idStr, pTask->chkInfo.checkpointingId, req.transId);
|
pTask->id.idStr, pTask->chkInfo.checkpointingId, req.transId);
|
||||||
|
|
||||||
taosThreadMutexUnlock(&pTask->lock);
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
} else { // checkpoint already finished, and not in checkpoint status
|
||||||
|
if (req.checkpointId <= pTask->chkInfo.checkpointId) {
|
||||||
|
tqWarn("s-task:%s repeatly recv checkpoint-source msg checkpointId:%" PRId64
|
||||||
|
" transId:%d already handled, ignore and discard", pTask->id.idStr, req.checkpointId, req.transId);
|
||||||
|
|
||||||
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
streamProcessCheckpointSourceReq(pTask, &req);
|
streamProcessCheckpointSourceReq(pTask, &req);
|
||||||
|
|
|
@ -485,6 +485,7 @@ SVCreateTbReq* buildAutoCreateTableReq(const char* stbFullName, int64_t suid, in
|
||||||
|
|
||||||
int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, const char* id) {
|
int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, const char* id) {
|
||||||
if (tSimpleHashGetSize(pSinkTableMap) > MAX_CACHE_TABLE_INFO_NUM) {
|
if (tSimpleHashGetSize(pSinkTableMap) > MAX_CACHE_TABLE_INFO_NUM) {
|
||||||
|
taosMemoryFreeClear(pTableSinkInfo); // too many items, failed to cache it
|
||||||
return TSDB_CODE_FAILED;
|
return TSDB_CODE_FAILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -897,6 +897,7 @@ int32_t tsdbFSEditCommit(STFileSystem *fs) {
|
||||||
|
|
||||||
// commit
|
// commit
|
||||||
code = commit_edit(fs);
|
code = commit_edit(fs);
|
||||||
|
ASSERT(code == 0);
|
||||||
TSDB_CHECK_CODE(code, lino, _exit);
|
TSDB_CHECK_CODE(code, lino, _exit);
|
||||||
|
|
||||||
// schedule merge
|
// schedule merge
|
||||||
|
@ -973,11 +974,11 @@ int32_t tsdbFSEditCommit(STFileSystem *fs) {
|
||||||
|
|
||||||
_exit:
|
_exit:
|
||||||
if (code) {
|
if (code) {
|
||||||
TSDB_ERROR_LOG(TD_VID(fs->tsdb->pVnode), lino, code);
|
tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(fs->tsdb->pVnode), __func__, lino, tstrerror(code));
|
||||||
} else {
|
} else {
|
||||||
tsdbDebug("vgId:%d %s done, etype:%d", TD_VID(fs->tsdb->pVnode), __func__, fs->etype);
|
tsdbInfo("vgId:%d %s done, etype:%d", TD_VID(fs->tsdb->pVnode), __func__, fs->etype);
|
||||||
tsem_post(&fs->canEdit);
|
|
||||||
}
|
}
|
||||||
|
tsem_post(&fs->canEdit);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -580,9 +580,9 @@ int32_t tsdbMerge(void *arg) {
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
// do merge
|
// do merge
|
||||||
tsdbDebug("vgId:%d merge begin, fid:%d", TD_VID(tsdb->pVnode), merger->fid);
|
tsdbInfo("vgId:%d merge begin, fid:%d", TD_VID(tsdb->pVnode), merger->fid);
|
||||||
code = tsdbDoMerge(merger);
|
code = tsdbDoMerge(merger);
|
||||||
tsdbDebug("vgId:%d merge done, fid:%d", TD_VID(tsdb->pVnode), mergeArg->fid);
|
tsdbInfo("vgId:%d merge done, fid:%d", TD_VID(tsdb->pVnode), mergeArg->fid);
|
||||||
TSDB_CHECK_CODE(code, lino, _exit);
|
TSDB_CHECK_CODE(code, lino, _exit);
|
||||||
|
|
||||||
_exit:
|
_exit:
|
||||||
|
|
|
@ -926,7 +926,7 @@ static int32_t execAlterLocal(SAlterLocalStmt* pStmt) {
|
||||||
return terrno;
|
return terrno;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cfgSetItem(tsCfg, pStmt->config, pStmt->value, CFG_STYPE_ALTER_CMD)) {
|
if (cfgSetItem(tsCfg, pStmt->config, pStmt->value, CFG_STYPE_ALTER_CMD, true)) {
|
||||||
return terrno;
|
return terrno;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define CHECK_RSP_INTERVAL 300
|
#define CHECK_RSP_CHECK_INTERVAL 300
|
||||||
#define LAUNCH_HTASK_INTERVAL 100
|
#define LAUNCH_HTASK_INTERVAL 100
|
||||||
#define WAIT_FOR_MINIMAL_INTERVAL 100.00
|
#define WAIT_FOR_MINIMAL_INTERVAL 100.00
|
||||||
#define MAX_RETRY_LAUNCH_HISTORY_TASK 40
|
#define MAX_RETRY_LAUNCH_HISTORY_TASK 40
|
||||||
|
@ -69,6 +69,7 @@ typedef struct {
|
||||||
int64_t chkpId;
|
int64_t chkpId;
|
||||||
char* dbPrefixPath;
|
char* dbPrefixPath;
|
||||||
} SStreamTaskSnap;
|
} SStreamTaskSnap;
|
||||||
|
|
||||||
struct STokenBucket {
|
struct STokenBucket {
|
||||||
int32_t numCapacity; // total capacity, available token per second
|
int32_t numCapacity; // total capacity, available token per second
|
||||||
int32_t numOfToken; // total available tokens
|
int32_t numOfToken; // total available tokens
|
||||||
|
@ -148,18 +149,19 @@ int32_t streamQueueGetItemSize(const SStreamQueue* pQueue);
|
||||||
|
|
||||||
void streamMetaRemoveDB(void* arg, char* key);
|
void streamMetaRemoveDB(void* arg, char* key);
|
||||||
|
|
||||||
typedef enum UPLOAD_TYPE {
|
typedef enum ECHECKPOINT_BACKUP_TYPE {
|
||||||
UPLOAD_DISABLE = -1,
|
DATA_UPLOAD_DISABLE = -1,
|
||||||
UPLOAD_S3 = 0,
|
DATA_UPLOAD_S3 = 0,
|
||||||
UPLOAD_RSYNC = 1,
|
DATA_UPLOAD_RSYNC = 1,
|
||||||
} UPLOAD_TYPE;
|
} ECHECKPOINT_BACKUP_TYPE;
|
||||||
|
|
||||||
UPLOAD_TYPE getUploadType();
|
ECHECKPOINT_BACKUP_TYPE streamGetCheckpointBackupType();
|
||||||
int uploadCheckpoint(char* id, char* path);
|
|
||||||
int downloadCheckpoint(char* id, char* path);
|
int32_t streamTaskBackupCheckpoint(char* id, char* path);
|
||||||
int deleteCheckpoint(char* id);
|
int32_t downloadCheckpoint(char* id, char* path);
|
||||||
int deleteCheckpointFile(char* id, char* name);
|
int32_t deleteCheckpoint(char* id);
|
||||||
int downloadCheckpointByName(char* id, char* fname, char* dstName);
|
int32_t deleteCheckpointFile(char* id, char* name);
|
||||||
|
int32_t downloadCheckpointByName(char* id, char* fname, char* dstName);
|
||||||
|
|
||||||
int32_t streamTaskOnNormalTaskReady(SStreamTask* pTask);
|
int32_t streamTaskOnNormalTaskReady(SStreamTask* pTask);
|
||||||
int32_t streamTaskOnScanhistoryTaskReady(SStreamTask* pTask);
|
int32_t streamTaskOnScanhistoryTaskReady(SStreamTask* pTask);
|
||||||
|
|
|
@ -376,10 +376,10 @@ int32_t rebuildFromRemoteChkp_s3(char* key, char* chkpPath, int64_t chkpId, char
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
int32_t rebuildFromRemoteChkp(char* key, char* chkpPath, int64_t chkpId, char* defaultPath) {
|
int32_t rebuildFromRemoteChkp(char* key, char* chkpPath, int64_t chkpId, char* defaultPath) {
|
||||||
UPLOAD_TYPE type = getUploadType();
|
ECHECKPOINT_BACKUP_TYPE type = streamGetCheckpointBackupType();
|
||||||
if (type == UPLOAD_S3) {
|
if (type == DATA_UPLOAD_S3) {
|
||||||
return rebuildFromRemoteChkp_s3(key, chkpPath, chkpId, defaultPath);
|
return rebuildFromRemoteChkp_s3(key, chkpPath, chkpId, defaultPath);
|
||||||
} else if (type == UPLOAD_RSYNC) {
|
} else if (type == DATA_UPLOAD_RSYNC) {
|
||||||
return rebuildFromRemoteChkp_rsync(key, chkpPath, chkpId, defaultPath);
|
return rebuildFromRemoteChkp_rsync(key, chkpPath, chkpId, defaultPath);
|
||||||
}
|
}
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -2111,11 +2111,11 @@ int32_t taskDbGenChkpUploadData__s3(STaskDbWrapper* pDb, void* bkdChkpMgt, int64
|
||||||
}
|
}
|
||||||
int32_t taskDbGenChkpUploadData(void* arg, void* mgt, int64_t chkpId, int8_t type, char** path, SArray* list) {
|
int32_t taskDbGenChkpUploadData(void* arg, void* mgt, int64_t chkpId, int8_t type, char** path, SArray* list) {
|
||||||
STaskDbWrapper* pDb = arg;
|
STaskDbWrapper* pDb = arg;
|
||||||
UPLOAD_TYPE utype = type;
|
ECHECKPOINT_BACKUP_TYPE utype = type;
|
||||||
|
|
||||||
if (utype == UPLOAD_RSYNC) {
|
if (utype == DATA_UPLOAD_RSYNC) {
|
||||||
return taskDbGenChkpUploadData__rsync(pDb, chkpId, path);
|
return taskDbGenChkpUploadData__rsync(pDb, chkpId, path);
|
||||||
} else if (utype == UPLOAD_S3) {
|
} else if (utype == DATA_UPLOAD_S3) {
|
||||||
return taskDbGenChkpUploadData__s3(pDb, mgt, chkpId, path, list);
|
return taskDbGenChkpUploadData__s3(pDb, mgt, chkpId, path, list);
|
||||||
}
|
}
|
||||||
return -1;
|
return -1;
|
||||||
|
|
|
@ -26,16 +26,22 @@ static void rspMonitorFn(void* param, void* tmrId);
|
||||||
static int32_t streamTaskInitTaskCheckInfo(STaskCheckInfo* pInfo, STaskOutputInfo* pOutputInfo, int64_t startTs);
|
static int32_t streamTaskInitTaskCheckInfo(STaskCheckInfo* pInfo, STaskOutputInfo* pOutputInfo, int64_t startTs);
|
||||||
static int32_t streamTaskStartCheckDownstream(STaskCheckInfo* pInfo, const char* id);
|
static int32_t streamTaskStartCheckDownstream(STaskCheckInfo* pInfo, const char* id);
|
||||||
static int32_t streamTaskCompleteCheckRsp(STaskCheckInfo* pInfo, bool lock, const char* id);
|
static int32_t streamTaskCompleteCheckRsp(STaskCheckInfo* pInfo, bool lock, const char* id);
|
||||||
|
static int32_t streamTaskAddReqInfo(STaskCheckInfo* pInfo, int64_t reqId, int32_t taskId, int32_t vgId, const char* id);
|
||||||
static void doSendCheckMsg(SStreamTask* pTask, SDownstreamStatusInfo* p);
|
static void doSendCheckMsg(SStreamTask* pTask, SDownstreamStatusInfo* p);
|
||||||
|
static void handleTimeoutDownstreamTasks(SStreamTask* pTask, SArray* pTimeoutList);
|
||||||
|
static void handleNotReadyDownstreamTask(SStreamTask* pTask, SArray* pNotReadyList);
|
||||||
|
static int32_t streamTaskUpdateCheckInfo(STaskCheckInfo* pInfo, int32_t taskId, int32_t status, int64_t rspTs,
|
||||||
|
int64_t reqId, int32_t* pNotReady, const char* id);
|
||||||
|
static void setCheckDownstreamReqInfo(SStreamTaskCheckReq* pReq, int64_t reqId, int32_t dstTaskId, int32_t dstNodeId);
|
||||||
static void getCheckRspStatus(STaskCheckInfo* pInfo, int64_t el, int32_t* numOfReady, int32_t* numOfFault,
|
static void getCheckRspStatus(STaskCheckInfo* pInfo, int64_t el, int32_t* numOfReady, int32_t* numOfFault,
|
||||||
int32_t* numOfNotRsp, SArray* pTimeoutList, SArray* pNotReadyList, const char* id);
|
int32_t* numOfNotRsp, SArray* pTimeoutList, SArray* pNotReadyList, const char* id);
|
||||||
|
|
||||||
static SDownstreamStatusInfo* findCheckRspStatus(STaskCheckInfo* pInfo, int32_t taskId);
|
static SDownstreamStatusInfo* findCheckRspStatus(STaskCheckInfo* pInfo, int32_t taskId);
|
||||||
|
|
||||||
// check status
|
// check status
|
||||||
void streamTaskCheckDownstream(SStreamTask* pTask) {
|
void streamTaskCheckDownstream(SStreamTask* pTask) {
|
||||||
SDataRange* pRange = &pTask->dataRange;
|
SDataRange* pRange = &pTask->dataRange;
|
||||||
STimeWindow* pWindow = &pRange->window;
|
STimeWindow* pWindow = &pRange->window;
|
||||||
|
const char* idstr = pTask->id.idStr;
|
||||||
|
|
||||||
SStreamTaskCheckReq req = {
|
SStreamTaskCheckReq req = {
|
||||||
.streamId = pTask->id.streamId,
|
.streamId = pTask->id.streamId,
|
||||||
|
@ -51,16 +57,15 @@ void streamTaskCheckDownstream(SStreamTask* pTask) {
|
||||||
if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
|
if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
|
||||||
streamTaskStartMonitorCheckRsp(pTask);
|
streamTaskStartMonitorCheckRsp(pTask);
|
||||||
|
|
||||||
req.reqId = tGenIdPI64();
|
STaskDispatcherFixed* pDispatch = &pTask->outputInfo.fixedDispatcher;
|
||||||
req.downstreamNodeId = pTask->outputInfo.fixedDispatcher.nodeId;
|
|
||||||
req.downstreamTaskId = pTask->outputInfo.fixedDispatcher.taskId;
|
|
||||||
|
|
||||||
streamTaskAddReqInfo(&pTask->taskCheckInfo, req.reqId, req.downstreamTaskId, pTask->id.idStr);
|
setCheckDownstreamReqInfo(&req, tGenIdPI64(), pDispatch->taskId, pDispatch->nodeId);
|
||||||
|
streamTaskAddReqInfo(&pTask->taskCheckInfo, req.reqId, pDispatch->taskId, pDispatch->nodeId, idstr);
|
||||||
|
|
||||||
stDebug("s-task:%s (vgId:%d) stage:%" PRId64 " check single downstream task:0x%x(vgId:%d) ver:%" PRId64 "-%" PRId64
|
stDebug("s-task:%s (vgId:%d) stage:%" PRId64 " check single downstream task:0x%x(vgId:%d) ver:%" PRId64 "-%" PRId64
|
||||||
" window:%" PRId64 "-%" PRId64 " reqId:0x%" PRIx64,
|
" window:%" PRId64 "-%" PRId64 " reqId:0x%" PRIx64,
|
||||||
pTask->id.idStr, pTask->info.nodeId, req.stage, req.downstreamTaskId, req.downstreamNodeId,
|
idstr, pTask->info.nodeId, req.stage, req.downstreamTaskId, req.downstreamNodeId, pRange->range.minVer,
|
||||||
pRange->range.minVer, pRange->range.maxVer, pWindow->skey, pWindow->ekey, req.reqId);
|
pRange->range.maxVer, pWindow->skey, pWindow->ekey, req.reqId);
|
||||||
|
|
||||||
streamSendCheckMsg(pTask, &req, pTask->outputInfo.fixedDispatcher.nodeId, &pTask->outputInfo.fixedDispatcher.epSet);
|
streamSendCheckMsg(pTask, &req, pTask->outputInfo.fixedDispatcher.nodeId, &pTask->outputInfo.fixedDispatcher.epSet);
|
||||||
|
|
||||||
|
@ -70,25 +75,23 @@ void streamTaskCheckDownstream(SStreamTask* pTask) {
|
||||||
SArray* vgInfo = pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos;
|
SArray* vgInfo = pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos;
|
||||||
|
|
||||||
int32_t numOfVgs = taosArrayGetSize(vgInfo);
|
int32_t numOfVgs = taosArrayGetSize(vgInfo);
|
||||||
stDebug("s-task:%s check %d downstream tasks, ver:%" PRId64 "-%" PRId64 " window:%" PRId64 "-%" PRId64,
|
stDebug("s-task:%s check %d downstream tasks, ver:%" PRId64 "-%" PRId64 " window:%" PRId64 "-%" PRId64, idstr,
|
||||||
pTask->id.idStr, numOfVgs, pRange->range.minVer, pRange->range.maxVer, pWindow->skey, pWindow->ekey);
|
numOfVgs, pRange->range.minVer, pRange->range.maxVer, pWindow->skey, pWindow->ekey);
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfVgs; i++) {
|
for (int32_t i = 0; i < numOfVgs; i++) {
|
||||||
SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
|
SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
|
||||||
req.reqId = tGenIdPI64();
|
|
||||||
req.downstreamNodeId = pVgInfo->vgId;
|
|
||||||
req.downstreamTaskId = pVgInfo->taskId;
|
|
||||||
|
|
||||||
streamTaskAddReqInfo(&pTask->taskCheckInfo, req.reqId, req.downstreamTaskId, pTask->id.idStr);
|
setCheckDownstreamReqInfo(&req, tGenIdPI64(), pVgInfo->taskId, pVgInfo->vgId);
|
||||||
|
streamTaskAddReqInfo(&pTask->taskCheckInfo, req.reqId, pVgInfo->taskId, pVgInfo->vgId, idstr);
|
||||||
|
|
||||||
stDebug("s-task:%s (vgId:%d) stage:%" PRId64
|
stDebug("s-task:%s (vgId:%d) stage:%" PRId64
|
||||||
" check downstream task:0x%x (vgId:%d) (shuffle), idx:%d, reqId:0x%" PRIx64,
|
" check downstream task:0x%x (vgId:%d) (shuffle), idx:%d, reqId:0x%" PRIx64,
|
||||||
pTask->id.idStr, pTask->info.nodeId, req.stage, req.downstreamTaskId, req.downstreamNodeId, i, req.reqId);
|
idstr, pTask->info.nodeId, req.stage, req.downstreamTaskId, req.downstreamNodeId, i, req.reqId);
|
||||||
streamSendCheckMsg(pTask, &req, pVgInfo->vgId, &pVgInfo->epSet);
|
streamSendCheckMsg(pTask, &req, pVgInfo->vgId, &pVgInfo->epSet);
|
||||||
}
|
}
|
||||||
} else { // for sink task, set it ready directly.
|
} else { // for sink task, set it ready directly.
|
||||||
stDebug("s-task:%s (vgId:%d) set downstream ready, since no downstream", pTask->id.idStr, pTask->info.nodeId);
|
stDebug("s-task:%s (vgId:%d) set downstream ready, since no downstream", idstr, pTask->info.nodeId);
|
||||||
streamTaskStopMonitorCheckRsp(&pTask->taskCheckInfo, pTask->id.idStr);
|
streamTaskStopMonitorCheckRsp(&pTask->taskCheckInfo, idstr);
|
||||||
processDownstreamReadyRsp(pTask);
|
processDownstreamReadyRsp(pTask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -158,44 +161,26 @@ int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRs
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamTaskAddReqInfo(STaskCheckInfo* pInfo, int64_t reqId, int32_t taskId, const char* id) {
|
|
||||||
SDownstreamStatusInfo info = {.taskId = taskId, .status = -1, .reqId = reqId, .rspTs = 0};
|
|
||||||
|
|
||||||
taosThreadMutexLock(&pInfo->checkInfoLock);
|
|
||||||
|
|
||||||
SDownstreamStatusInfo* p = findCheckRspStatus(pInfo, taskId);
|
|
||||||
if (p != NULL) {
|
|
||||||
stDebug("s-task:%s check info to task:0x%x already sent", id, taskId);
|
|
||||||
taosThreadMutexUnlock(&pInfo->checkInfoLock);
|
|
||||||
return TSDB_CODE_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
taosArrayPush(pInfo->pList, &info);
|
|
||||||
|
|
||||||
taosThreadMutexUnlock(&pInfo->checkInfoLock);
|
|
||||||
return TSDB_CODE_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t streamTaskStartMonitorCheckRsp(SStreamTask* pTask) {
|
int32_t streamTaskStartMonitorCheckRsp(SStreamTask* pTask) {
|
||||||
STaskCheckInfo* pInfo = &pTask->taskCheckInfo;
|
STaskCheckInfo* pInfo = &pTask->taskCheckInfo;
|
||||||
|
|
||||||
taosThreadMutexLock(&pInfo->checkInfoLock);
|
taosThreadMutexLock(&pInfo->checkInfoLock);
|
||||||
|
|
||||||
int32_t code = streamTaskStartCheckDownstream(pInfo, pTask->id.idStr);
|
int32_t code = streamTaskStartCheckDownstream(pInfo, pTask->id.idStr);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
|
||||||
taosThreadMutexUnlock(&pInfo->checkInfoLock);
|
taosThreadMutexUnlock(&pInfo->checkInfoLock);
|
||||||
return TSDB_CODE_FAILED;
|
return TSDB_CODE_FAILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*SStreamTask* p = */ streamMetaAcquireOneTask(pTask); // add task ref here
|
||||||
streamTaskInitTaskCheckInfo(pInfo, &pTask->outputInfo, taosGetTimestampMs());
|
streamTaskInitTaskCheckInfo(pInfo, &pTask->outputInfo, taosGetTimestampMs());
|
||||||
|
|
||||||
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
||||||
stDebug("s-task:%s start check rsp monit, ref:%d ", pTask->id.idStr, ref);
|
stDebug("s-task:%s start check rsp monit, ref:%d ", pTask->id.idStr, ref);
|
||||||
|
|
||||||
if (pInfo->checkRspTmr == NULL) {
|
if (pInfo->checkRspTmr == NULL) {
|
||||||
pInfo->checkRspTmr = taosTmrStart(rspMonitorFn, CHECK_RSP_INTERVAL, pTask, streamTimer);
|
pInfo->checkRspTmr = taosTmrStart(rspMonitorFn, CHECK_RSP_CHECK_INTERVAL, pTask, streamTimer);
|
||||||
} else {
|
} else {
|
||||||
taosTmrReset(rspMonitorFn, CHECK_RSP_INTERVAL, pTask, streamTimer, &pInfo->checkRspTmr);
|
taosTmrReset(rspMonitorFn, CHECK_RSP_CHECK_INTERVAL, pTask, streamTimer, &pInfo->checkRspTmr);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosThreadMutexUnlock(&pInfo->checkInfoLock);
|
taosThreadMutexUnlock(&pInfo->checkInfoLock);
|
||||||
|
@ -307,10 +292,8 @@ int32_t streamTaskUpdateCheckInfo(STaskCheckInfo* pInfo, int32_t taskId, int32_t
|
||||||
|
|
||||||
SDownstreamStatusInfo* p = findCheckRspStatus(pInfo, taskId);
|
SDownstreamStatusInfo* p = findCheckRspStatus(pInfo, taskId);
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
|
|
||||||
if (reqId != p->reqId) {
|
if (reqId != p->reqId) {
|
||||||
stError("s-task:%s reqId:%" PRIx64 " expected:%" PRIx64
|
stError("s-task:%s reqId:%" PRIx64 " expected:%" PRIx64 " expired check-rsp recv from downstream task:0x%x, discarded",
|
||||||
" expired check-rsp recv from downstream task:0x%x, discarded",
|
|
||||||
id, reqId, p->reqId, taskId);
|
id, reqId, p->reqId, taskId);
|
||||||
taosThreadMutexUnlock(&pInfo->checkInfoLock);
|
taosThreadMutexUnlock(&pInfo->checkInfoLock);
|
||||||
return TSDB_CODE_FAILED;
|
return TSDB_CODE_FAILED;
|
||||||
|
@ -341,12 +324,13 @@ int32_t streamTaskStartCheckDownstream(STaskCheckInfo* pInfo, const char* id) {
|
||||||
pInfo->inCheckProcess = 1;
|
pInfo->inCheckProcess = 1;
|
||||||
} else {
|
} else {
|
||||||
ASSERT(pInfo->startTs > 0);
|
ASSERT(pInfo->startTs > 0);
|
||||||
stError("s-task:%s already in check procedure, checkTs:%"PRId64", start monitor check rsp failed", id, pInfo->startTs);
|
stError("s-task:%s already in check procedure, checkTs:%" PRId64 ", start monitor check rsp failed", id,
|
||||||
|
pInfo->startTs);
|
||||||
return TSDB_CODE_FAILED;
|
return TSDB_CODE_FAILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
stDebug("s-task:%s set the in-check-procedure flag", id);
|
stDebug("s-task:%s set the in-check-procedure flag", id);
|
||||||
return 0;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamTaskCompleteCheckRsp(STaskCheckInfo* pInfo, bool lock, const char* id) {
|
int32_t streamTaskCompleteCheckRsp(STaskCheckInfo* pInfo, bool lock, const char* id) {
|
||||||
|
@ -355,7 +339,7 @@ int32_t streamTaskCompleteCheckRsp(STaskCheckInfo* pInfo, bool lock, const char*
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pInfo->inCheckProcess) {
|
if (!pInfo->inCheckProcess) {
|
||||||
stWarn("s-task:%s already not in-check-procedure", id);
|
// stWarn("s-task:%s already not in-check-procedure", id);
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t el = (pInfo->startTs != 0) ? (taosGetTimestampMs() - pInfo->startTs) : 0;
|
int64_t el = (pInfo->startTs != 0) ? (taosGetTimestampMs() - pInfo->startTs) : 0;
|
||||||
|
@ -378,6 +362,23 @@ int32_t streamTaskCompleteCheckRsp(STaskCheckInfo* pInfo, bool lock, const char*
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t streamTaskAddReqInfo(STaskCheckInfo* pInfo, int64_t reqId, int32_t taskId, int32_t vgId, const char* id) {
|
||||||
|
SDownstreamStatusInfo info = {.taskId = taskId, .status = -1, .vgId = vgId, .reqId = reqId, .rspTs = 0};
|
||||||
|
taosThreadMutexLock(&pInfo->checkInfoLock);
|
||||||
|
|
||||||
|
SDownstreamStatusInfo* p = findCheckRspStatus(pInfo, taskId);
|
||||||
|
if (p != NULL) {
|
||||||
|
stDebug("s-task:%s check info to task:0x%x already sent", id, taskId);
|
||||||
|
taosThreadMutexUnlock(&pInfo->checkInfoLock);
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
taosArrayPush(pInfo->pList, &info);
|
||||||
|
|
||||||
|
taosThreadMutexUnlock(&pInfo->checkInfoLock);
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
void doSendCheckMsg(SStreamTask* pTask, SDownstreamStatusInfo* p) {
|
void doSendCheckMsg(SStreamTask* pTask, SDownstreamStatusInfo* p) {
|
||||||
SStreamTaskCheckReq req = {
|
SStreamTaskCheckReq req = {
|
||||||
.streamId = pTask->id.streamId,
|
.streamId = pTask->id.streamId,
|
||||||
|
@ -389,9 +390,9 @@ void doSendCheckMsg(SStreamTask* pTask, SDownstreamStatusInfo* p) {
|
||||||
|
|
||||||
STaskOutputInfo* pOutputInfo = &pTask->outputInfo;
|
STaskOutputInfo* pOutputInfo = &pTask->outputInfo;
|
||||||
if (pOutputInfo->type == TASK_OUTPUT__FIXED_DISPATCH) {
|
if (pOutputInfo->type == TASK_OUTPUT__FIXED_DISPATCH) {
|
||||||
req.reqId = p->reqId;
|
STaskDispatcherFixed* pDispatch = &pOutputInfo->fixedDispatcher;
|
||||||
req.downstreamNodeId = pOutputInfo->fixedDispatcher.nodeId;
|
setCheckDownstreamReqInfo(&req, p->reqId, pDispatch->taskId, pDispatch->taskId);
|
||||||
req.downstreamTaskId = pOutputInfo->fixedDispatcher.taskId;
|
|
||||||
stDebug("s-task:%s (vgId:%d) stage:%" PRId64 " re-send check downstream task:0x%x(vgId:%d) reqId:0x%" PRIx64,
|
stDebug("s-task:%s (vgId:%d) stage:%" PRId64 " re-send check downstream task:0x%x(vgId:%d) reqId:0x%" PRIx64,
|
||||||
pTask->id.idStr, pTask->info.nodeId, req.stage, req.downstreamTaskId, req.downstreamNodeId, req.reqId);
|
pTask->id.idStr, pTask->info.nodeId, req.stage, req.downstreamTaskId, req.downstreamNodeId, req.reqId);
|
||||||
|
|
||||||
|
@ -404,9 +405,7 @@ void doSendCheckMsg(SStreamTask* pTask, SDownstreamStatusInfo* p) {
|
||||||
SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
|
SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
|
||||||
|
|
||||||
if (p->taskId == pVgInfo->taskId) {
|
if (p->taskId == pVgInfo->taskId) {
|
||||||
req.reqId = p->reqId;
|
setCheckDownstreamReqInfo(&req, p->reqId, pVgInfo->taskId, pVgInfo->vgId);
|
||||||
req.downstreamNodeId = pVgInfo->vgId;
|
|
||||||
req.downstreamTaskId = pVgInfo->taskId;
|
|
||||||
|
|
||||||
stDebug("s-task:%s (vgId:%d) stage:%" PRId64
|
stDebug("s-task:%s (vgId:%d) stage:%" PRId64
|
||||||
" re-send check downstream task:0x%x(vgId:%d) (shuffle), idx:%d reqId:0x%" PRIx64,
|
" re-send check downstream task:0x%x(vgId:%d) (shuffle), idx:%d reqId:0x%" PRIx64,
|
||||||
|
@ -423,7 +422,6 @@ void doSendCheckMsg(SStreamTask* pTask, SDownstreamStatusInfo* p) {
|
||||||
|
|
||||||
void getCheckRspStatus(STaskCheckInfo* pInfo, int64_t el, int32_t* numOfReady, int32_t* numOfFault,
|
void getCheckRspStatus(STaskCheckInfo* pInfo, int64_t el, int32_t* numOfReady, int32_t* numOfFault,
|
||||||
int32_t* numOfNotRsp, SArray* pTimeoutList, SArray* pNotReadyList, const char* id) {
|
int32_t* numOfNotRsp, SArray* pTimeoutList, SArray* pNotReadyList, const char* id) {
|
||||||
|
|
||||||
for (int32_t i = 0; i < taosArrayGetSize(pInfo->pList); ++i) {
|
for (int32_t i = 0; i < taosArrayGetSize(pInfo->pList); ++i) {
|
||||||
SDownstreamStatusInfo* p = taosArrayGet(pInfo->pList, i);
|
SDownstreamStatusInfo* p = taosArrayGet(pInfo->pList, i);
|
||||||
if (p->status == TASK_DOWNSTREAM_READY) {
|
if (p->status == TASK_DOWNSTREAM_READY) {
|
||||||
|
@ -447,8 +445,81 @@ void getCheckRspStatus(STaskCheckInfo* pInfo, int64_t el, int32_t* numOfReady, i
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void setCheckDownstreamReqInfo(SStreamTaskCheckReq* pReq, int64_t reqId, int32_t dstTaskId, int32_t dstNodeId) {
|
||||||
|
pReq->reqId = reqId;
|
||||||
|
pReq->downstreamTaskId = dstTaskId;
|
||||||
|
pReq->downstreamNodeId = dstNodeId;
|
||||||
|
}
|
||||||
|
|
||||||
|
void handleTimeoutDownstreamTasks(SStreamTask* pTask, SArray* pTimeoutList) {
|
||||||
|
STaskCheckInfo* pInfo = &pTask->taskCheckInfo;
|
||||||
|
const char* id = pTask->id.idStr;
|
||||||
|
int32_t vgId = pTask->pMeta->vgId;
|
||||||
|
int32_t numOfTimeout = taosArrayGetSize(pTimeoutList);
|
||||||
|
|
||||||
|
ASSERT(pTask->status.downstreamReady == 0);
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < numOfTimeout; ++i) {
|
||||||
|
int32_t taskId = *(int32_t*)taosArrayGet(pTimeoutList, i);
|
||||||
|
|
||||||
|
SDownstreamStatusInfo* p = findCheckRspStatus(pInfo, taskId);
|
||||||
|
if (p != NULL) {
|
||||||
|
ASSERT(p->status == -1 && p->rspTs == 0);
|
||||||
|
doSendCheckMsg(pTask, p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pInfo->timeoutRetryCount += 1;
|
||||||
|
|
||||||
|
// timeout more than 100 sec, add into node update list
|
||||||
|
if (pInfo->timeoutRetryCount > 10) {
|
||||||
|
pInfo->timeoutRetryCount = 0;
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < numOfTimeout; ++i) {
|
||||||
|
int32_t taskId = *(int32_t*)taosArrayGet(pTimeoutList, i);
|
||||||
|
SDownstreamStatusInfo* p = findCheckRspStatus(pInfo, taskId);
|
||||||
|
if (p != NULL) {
|
||||||
|
addIntoNodeUpdateList(pTask, p->vgId);
|
||||||
|
stDebug("s-task:%s vgId:%d downstream task:0x%x (vgId:%d) timeout more than 100sec, add into nodeUpate list",
|
||||||
|
id, vgId, p->taskId, p->vgId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stDebug("s-task:%s vgId:%d %d downstream task(s) all add into nodeUpate list", id, vgId, numOfTimeout);
|
||||||
|
} else {
|
||||||
|
stDebug("s-task:%s vgId:%d %d downstream task(s) timeout, send check msg again, retry:%d start time:%" PRId64, id,
|
||||||
|
vgId, numOfTimeout, pInfo->timeoutRetryCount, pInfo->startTs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void handleNotReadyDownstreamTask(SStreamTask* pTask, SArray* pNotReadyList) {
|
||||||
|
STaskCheckInfo* pInfo = &pTask->taskCheckInfo;
|
||||||
|
const char* id = pTask->id.idStr;
|
||||||
|
int32_t vgId = pTask->pMeta->vgId;
|
||||||
|
int32_t numOfNotReady = taosArrayGetSize(pNotReadyList);
|
||||||
|
|
||||||
|
ASSERT(pTask->status.downstreamReady == 0);
|
||||||
|
|
||||||
|
// reset the info, and send the check msg to failure downstream again
|
||||||
|
for (int32_t i = 0; i < numOfNotReady; ++i) {
|
||||||
|
int32_t taskId = *(int32_t*)taosArrayGet(pNotReadyList, i);
|
||||||
|
|
||||||
|
SDownstreamStatusInfo* p = findCheckRspStatus(pInfo, taskId);
|
||||||
|
if (p != NULL) {
|
||||||
|
p->rspTs = 0;
|
||||||
|
p->status = -1;
|
||||||
|
doSendCheckMsg(pTask, p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pInfo->notReadyRetryCount += 1;
|
||||||
|
stDebug("s-task:%s vgId:%d %d downstream task(s) not ready, send check msg again, retry:%d start time:%" PRId64, id,
|
||||||
|
vgId, numOfNotReady, pInfo->notReadyRetryCount, pInfo->startTs);
|
||||||
|
}
|
||||||
|
|
||||||
void rspMonitorFn(void* param, void* tmrId) {
|
void rspMonitorFn(void* param, void* tmrId) {
|
||||||
SStreamTask* pTask = param;
|
SStreamTask* pTask = param;
|
||||||
|
SStreamMeta* pMeta = pTask->pMeta;
|
||||||
SStreamTaskState* pStat = streamTaskGetStatus(pTask);
|
SStreamTaskState* pStat = streamTaskGetStatus(pTask);
|
||||||
STaskCheckInfo* pInfo = &pTask->taskCheckInfo;
|
STaskCheckInfo* pInfo = &pTask->taskCheckInfo;
|
||||||
int32_t vgId = pTask->pMeta->vgId;
|
int32_t vgId = pTask->pMeta->vgId;
|
||||||
|
@ -461,6 +532,7 @@ void rspMonitorFn(void* param, void* tmrId) {
|
||||||
int32_t numOfNotRsp = 0;
|
int32_t numOfNotRsp = 0;
|
||||||
int32_t numOfNotReady = 0;
|
int32_t numOfNotReady = 0;
|
||||||
int32_t numOfTimeout = 0;
|
int32_t numOfTimeout = 0;
|
||||||
|
int32_t total = taosArrayGetSize(pInfo->pList);
|
||||||
|
|
||||||
stDebug("s-task:%s start to do check-downstream-rsp check in tmr", id);
|
stDebug("s-task:%s start to do check-downstream-rsp check in tmr", id);
|
||||||
|
|
||||||
|
@ -475,6 +547,8 @@ void rspMonitorFn(void* param, void* tmrId) {
|
||||||
STaskId* pHId = &pTask->hTaskInfo.id;
|
STaskId* pHId = &pTask->hTaskInfo.id;
|
||||||
streamMetaAddTaskLaunchResult(pTask->pMeta, pHId->streamId, pHId->taskId, pInfo->startTs, now, false);
|
streamMetaAddTaskLaunchResult(pTask->pMeta, pHId->streamId, pHId->taskId, pInfo->startTs, now, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -483,6 +557,7 @@ void rspMonitorFn(void* param, void* tmrId) {
|
||||||
stDebug("s-task:%s status:%s vgId:%d quit from monitor check-rsp tmr, ref:%d", id, pStat->name, vgId, ref);
|
stDebug("s-task:%s status:%s vgId:%d quit from monitor check-rsp tmr, ref:%d", id, pStat->name, vgId, ref);
|
||||||
|
|
||||||
streamTaskCompleteCheckRsp(pInfo, true, id);
|
streamTaskCompleteCheckRsp(pInfo, true, id);
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -494,6 +569,7 @@ void rspMonitorFn(void* param, void* tmrId) {
|
||||||
|
|
||||||
streamTaskCompleteCheckRsp(pInfo, false, id);
|
streamTaskCompleteCheckRsp(pInfo, false, id);
|
||||||
taosThreadMutexUnlock(&pInfo->checkInfoLock);
|
taosThreadMutexUnlock(&pInfo->checkInfoLock);
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -510,7 +586,7 @@ void rspMonitorFn(void* param, void* tmrId) {
|
||||||
numOfTimeout = (int32_t)taosArrayGetSize(pTimeoutList);
|
numOfTimeout = (int32_t)taosArrayGetSize(pTimeoutList);
|
||||||
|
|
||||||
// fault tasks detected, not try anymore
|
// fault tasks detected, not try anymore
|
||||||
ASSERT((numOfReady + numOfFault + numOfNotReady + numOfTimeout + numOfNotRsp) == taosArrayGetSize(pInfo->pList));
|
ASSERT((numOfReady + numOfFault + numOfNotReady + numOfTimeout + numOfNotRsp) == total);
|
||||||
if (numOfFault > 0) {
|
if (numOfFault > 0) {
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||||
stDebug(
|
stDebug(
|
||||||
|
@ -520,6 +596,7 @@ void rspMonitorFn(void* param, void* tmrId) {
|
||||||
|
|
||||||
streamTaskCompleteCheckRsp(pInfo, false, id);
|
streamTaskCompleteCheckRsp(pInfo, false, id);
|
||||||
taosThreadMutexUnlock(&pInfo->checkInfoLock);
|
taosThreadMutexUnlock(&pInfo->checkInfoLock);
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
|
||||||
taosArrayDestroy(pNotReadyList);
|
taosArrayDestroy(pNotReadyList);
|
||||||
taosArrayDestroy(pTimeoutList);
|
taosArrayDestroy(pTimeoutList);
|
||||||
|
@ -538,69 +615,32 @@ void rspMonitorFn(void* param, void* tmrId) {
|
||||||
taosThreadMutexUnlock(&pInfo->checkInfoLock);
|
taosThreadMutexUnlock(&pInfo->checkInfoLock);
|
||||||
|
|
||||||
// add the not-ready tasks into the final task status result buf, along with related fill-history task if exists.
|
// add the not-ready tasks into the final task status result buf, along with related fill-history task if exists.
|
||||||
streamMetaAddTaskLaunchResult(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, pInfo->startTs, now, false);
|
streamMetaAddTaskLaunchResult(pMeta, pTask->id.streamId, pTask->id.taskId, pInfo->startTs, now, false);
|
||||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||||
STaskId* pHId = &pTask->hTaskInfo.id;
|
STaskId* pHId = &pTask->hTaskInfo.id;
|
||||||
streamMetaAddTaskLaunchResult(pTask->pMeta, pHId->streamId, pHId->taskId, pInfo->startTs, now, false);
|
streamMetaAddTaskLaunchResult(pMeta, pHId->streamId, pHId->taskId, pInfo->startTs, now, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
|
||||||
taosArrayDestroy(pNotReadyList);
|
taosArrayDestroy(pNotReadyList);
|
||||||
taosArrayDestroy(pTimeoutList);
|
taosArrayDestroy(pTimeoutList);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (numOfNotReady > 0) { // check to make sure not in recheck timer
|
if (numOfNotReady > 0) { // check to make sure not in recheck timer
|
||||||
ASSERT(pTask->status.downstreamReady == 0);
|
handleNotReadyDownstreamTask(pTask, pNotReadyList);
|
||||||
|
|
||||||
// reset the info, and send the check msg to failure downstream again
|
|
||||||
for (int32_t i = 0; i < numOfNotReady; ++i) {
|
|
||||||
int32_t taskId = *(int32_t*)taosArrayGet(pNotReadyList, i);
|
|
||||||
|
|
||||||
SDownstreamStatusInfo* p = findCheckRspStatus(pInfo, taskId);
|
|
||||||
if (p != NULL) {
|
|
||||||
p->rspTs = 0;
|
|
||||||
p->status = -1;
|
|
||||||
doSendCheckMsg(pTask, p);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pInfo->notReadyRetryCount += 1;
|
|
||||||
stDebug("s-task:%s %d downstream task(s) not ready, send check msg again, retry:%d start time:%" PRId64, id,
|
|
||||||
numOfNotReady, pInfo->notReadyRetryCount, pInfo->startTs);
|
|
||||||
}
|
|
||||||
|
|
||||||
// todo add into node update list and send to mnode
|
|
||||||
if (numOfTimeout > 0) {
|
if (numOfTimeout > 0) {
|
||||||
ASSERT(pTask->status.downstreamReady == 0);
|
handleTimeoutDownstreamTasks(pTask, pTimeoutList);
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfTimeout; ++i) {
|
|
||||||
int32_t taskId = *(int32_t*)taosArrayGet(pTimeoutList, i);
|
|
||||||
|
|
||||||
SDownstreamStatusInfo* p = findCheckRspStatus(pInfo, taskId);
|
|
||||||
if (p != NULL) {
|
|
||||||
ASSERT(p->status == -1 && p->rspTs == 0);
|
|
||||||
doSendCheckMsg(pTask, p);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pInfo->timeoutRetryCount += 1;
|
taosTmrReset(rspMonitorFn, CHECK_RSP_CHECK_INTERVAL, pTask, streamTimer, &pInfo->checkRspTmr);
|
||||||
|
|
||||||
// timeout more than 100 sec, add into node update list
|
|
||||||
if (pInfo->timeoutRetryCount > 10) {
|
|
||||||
pInfo->timeoutRetryCount = 0;
|
|
||||||
stDebug("s-task:%s vgId:%d %d downstream task(s) timeout more than 100sec, add into nodeUpate list", id, vgId,
|
|
||||||
numOfTimeout);
|
|
||||||
} else {
|
|
||||||
stDebug("s-task:%s vgId:%d %d downstream task(s) timeout, send check msg again, retry:%d start time:%" PRId64, id,
|
|
||||||
vgId, numOfTimeout, pInfo->timeoutRetryCount, pInfo->startTs);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
taosTmrReset(rspMonitorFn, CHECK_RSP_INTERVAL, pTask, streamTimer, &pInfo->checkRspTmr);
|
|
||||||
taosThreadMutexUnlock(&pInfo->checkInfoLock);
|
taosThreadMutexUnlock(&pInfo->checkInfoLock);
|
||||||
|
|
||||||
stDebug("s-task:%s continue checking rsp in 300ms, notRsp:%d, notReady:%d, fault:%d, timeout:%d, ready:%d", id,
|
stDebug("s-task:%s continue checking rsp in 300ms, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, ready:%d",
|
||||||
numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady);
|
id, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady);
|
||||||
|
|
||||||
taosArrayDestroy(pNotReadyList);
|
taosArrayDestroy(pNotReadyList);
|
||||||
taosArrayDestroy(pTimeoutList);
|
taosArrayDestroy(pTimeoutList);
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
#include "streamInt.h"
|
#include "streamInt.h"
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
UPLOAD_TYPE type;
|
ECHECKPOINT_BACKUP_TYPE type;
|
||||||
char* taskId;
|
char* taskId;
|
||||||
int64_t chkpId;
|
int64_t chkpId;
|
||||||
|
|
||||||
|
@ -416,7 +416,7 @@ int32_t getChkpMeta(char* id, char* path, SArray* list) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t doUploadChkp(void* param) {
|
int32_t uploadCheckpointData(void* param) {
|
||||||
SAsyncUploadArg* arg = param;
|
SAsyncUploadArg* arg = param;
|
||||||
char* path = NULL;
|
char* path = NULL;
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
@ -426,13 +426,13 @@ int32_t doUploadChkp(void* param) {
|
||||||
(int8_t)(arg->type), &path, toDelFiles)) != 0) {
|
(int8_t)(arg->type), &path, toDelFiles)) != 0) {
|
||||||
stError("s-task:%s failed to gen upload checkpoint:%" PRId64 "", arg->pTask->id.idStr, arg->chkpId);
|
stError("s-task:%s failed to gen upload checkpoint:%" PRId64 "", arg->pTask->id.idStr, arg->chkpId);
|
||||||
}
|
}
|
||||||
if (arg->type == UPLOAD_S3) {
|
if (arg->type == DATA_UPLOAD_S3) {
|
||||||
if (code == 0 && (code = getChkpMeta(arg->taskId, path, toDelFiles)) != 0) {
|
if (code == 0 && (code = getChkpMeta(arg->taskId, path, toDelFiles)) != 0) {
|
||||||
stError("s-task:%s failed to get checkpoint:%" PRId64 " meta", arg->pTask->id.idStr, arg->chkpId);
|
stError("s-task:%s failed to get checkpoint:%" PRId64 " meta", arg->pTask->id.idStr, arg->chkpId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (code == 0 && (code = uploadCheckpoint(arg->taskId, path)) != 0) {
|
if (code == 0 && (code = streamTaskBackupCheckpoint(arg->taskId, path)) != 0) {
|
||||||
stError("s-task:%s failed to upload checkpoint:%" PRId64, arg->pTask->id.idStr, arg->chkpId);
|
stError("s-task:%s failed to upload checkpoint:%" PRId64, arg->pTask->id.idStr, arg->chkpId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -459,8 +459,8 @@ int32_t doUploadChkp(void* param) {
|
||||||
|
|
||||||
int32_t streamTaskUploadChkp(SStreamTask* pTask, int64_t chkpId, char* taskId) {
|
int32_t streamTaskUploadChkp(SStreamTask* pTask, int64_t chkpId, char* taskId) {
|
||||||
// async upload
|
// async upload
|
||||||
UPLOAD_TYPE type = getUploadType();
|
ECHECKPOINT_BACKUP_TYPE type = streamGetCheckpointBackupType();
|
||||||
if (type == UPLOAD_DISABLE) {
|
if (type == DATA_UPLOAD_DISABLE) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -474,7 +474,7 @@ int32_t streamTaskUploadChkp(SStreamTask* pTask, int64_t chkpId, char* taskId) {
|
||||||
arg->chkpId = chkpId;
|
arg->chkpId = chkpId;
|
||||||
arg->pTask = pTask;
|
arg->pTask = pTask;
|
||||||
|
|
||||||
return streamMetaAsyncExec(pTask->pMeta, doUploadChkp, arg, NULL);
|
return streamMetaAsyncExec(pTask->pMeta, uploadCheckpointData, arg, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamTaskBuildCheckpoint(SStreamTask* pTask) {
|
int32_t streamTaskBuildCheckpoint(SStreamTask* pTask) {
|
||||||
|
@ -558,7 +558,7 @@ int32_t streamTaskBuildCheckpoint(SStreamTask* pTask) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int uploadCheckpointToS3(char* id, char* path) {
|
static int32_t uploadCheckpointToS3(char* id, char* path) {
|
||||||
TdDirPtr pDir = taosOpenDir(path);
|
TdDirPtr pDir = taosOpenDir(path);
|
||||||
if (pDir == NULL) return -1;
|
if (pDir == NULL) return -1;
|
||||||
|
|
||||||
|
@ -590,8 +590,8 @@ static int uploadCheckpointToS3(char* id, char* path) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int downloadCheckpointByNameS3(char* id, char* fname, char* dstName) {
|
static int32_t downloadCheckpointByNameS3(char* id, char* fname, char* dstName) {
|
||||||
int code = 0;
|
int32_t code = 0;
|
||||||
char* buf = taosMemoryCalloc(1, strlen(id) + strlen(dstName) + 4);
|
char* buf = taosMemoryCalloc(1, strlen(id) + strlen(dstName) + 4);
|
||||||
sprintf(buf, "%s/%s", id, fname);
|
sprintf(buf, "%s/%s", id, fname);
|
||||||
if (s3GetObjectToFile(buf, dstName) != 0) {
|
if (s3GetObjectToFile(buf, dstName) != 0) {
|
||||||
|
@ -601,19 +601,19 @@ static int downloadCheckpointByNameS3(char* id, char* fname, char* dstName) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
UPLOAD_TYPE getUploadType() {
|
ECHECKPOINT_BACKUP_TYPE streamGetCheckpointBackupType() {
|
||||||
if (strlen(tsSnodeAddress) != 0) {
|
if (strlen(tsSnodeAddress) != 0) {
|
||||||
return UPLOAD_RSYNC;
|
return DATA_UPLOAD_RSYNC;
|
||||||
} else if (tsS3StreamEnabled) {
|
} else if (tsS3StreamEnabled) {
|
||||||
return UPLOAD_S3;
|
return DATA_UPLOAD_S3;
|
||||||
} else {
|
} else {
|
||||||
return UPLOAD_DISABLE;
|
return DATA_UPLOAD_DISABLE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int uploadCheckpoint(char* id, char* path) {
|
int32_t streamTaskBackupCheckpoint(char* id, char* path) {
|
||||||
if (id == NULL || path == NULL || strlen(id) == 0 || strlen(path) == 0 || strlen(path) >= PATH_MAX) {
|
if (id == NULL || path == NULL || strlen(id) == 0 || strlen(path) == 0 || strlen(path) >= PATH_MAX) {
|
||||||
stError("uploadCheckpoint parameters invalid");
|
stError("streamTaskBackupCheckpoint parameters invalid");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if (strlen(tsSnodeAddress) != 0) {
|
if (strlen(tsSnodeAddress) != 0) {
|
||||||
|
@ -625,7 +625,7 @@ int uploadCheckpoint(char* id, char* path) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// fileName: CURRENT
|
// fileName: CURRENT
|
||||||
int downloadCheckpointByName(char* id, char* fname, char* dstName) {
|
int32_t downloadCheckpointByName(char* id, char* fname, char* dstName) {
|
||||||
if (id == NULL || fname == NULL || strlen(id) == 0 || strlen(fname) == 0 || strlen(fname) >= PATH_MAX) {
|
if (id == NULL || fname == NULL || strlen(id) == 0 || strlen(fname) == 0 || strlen(fname) >= PATH_MAX) {
|
||||||
stError("uploadCheckpointByName parameters invalid");
|
stError("uploadCheckpointByName parameters invalid");
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -638,7 +638,7 @@ int downloadCheckpointByName(char* id, char* fname, char* dstName) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int downloadCheckpoint(char* id, char* path) {
|
int32_t downloadCheckpoint(char* id, char* path) {
|
||||||
if (id == NULL || path == NULL || strlen(id) == 0 || strlen(path) == 0 || strlen(path) >= PATH_MAX) {
|
if (id == NULL || path == NULL || strlen(id) == 0 || strlen(path) == 0 || strlen(path) >= PATH_MAX) {
|
||||||
stError("downloadCheckpoint parameters invalid");
|
stError("downloadCheckpoint parameters invalid");
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -651,7 +651,7 @@ int downloadCheckpoint(char* id, char* path) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int deleteCheckpoint(char* id) {
|
int32_t deleteCheckpoint(char* id) {
|
||||||
if (id == NULL || strlen(id) == 0) {
|
if (id == NULL || strlen(id) == 0) {
|
||||||
stError("deleteCheckpoint parameters invalid");
|
stError("deleteCheckpoint parameters invalid");
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -664,7 +664,7 @@ int deleteCheckpoint(char* id) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int deleteCheckpointFile(char* id, char* name) {
|
int32_t deleteCheckpointFile(char* id, char* name) {
|
||||||
char object[128] = {0};
|
char object[128] = {0};
|
||||||
snprintf(object, sizeof(object), "%s/%s", id, name);
|
snprintf(object, sizeof(object), "%s/%s", id, name);
|
||||||
char* tmp = object;
|
char* tmp = object;
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
|
|
||||||
#define MAX_STREAM_EXEC_BATCH_NUM 32
|
#define MAX_STREAM_EXEC_BATCH_NUM 32
|
||||||
#define MAX_SMOOTH_BURST_RATIO 5 // 5 sec
|
#define MAX_SMOOTH_BURST_RATIO 5 // 5 sec
|
||||||
#define WAIT_FOR_DURATION 10
|
|
||||||
|
|
||||||
// todo refactor:
|
// todo refactor:
|
||||||
// read data from input queue
|
// read data from input queue
|
||||||
|
|
|
@ -85,9 +85,10 @@ void doExecScanhistoryInFuture(void* param, void* tmrId) {
|
||||||
|
|
||||||
SStreamTaskState* p = streamTaskGetStatus(pTask);
|
SStreamTaskState* p = streamTaskGetStatus(pTask);
|
||||||
if (p->state == TASK_STATUS__DROPPING || p->state == TASK_STATUS__STOP) {
|
if (p->state == TASK_STATUS__DROPPING || p->state == TASK_STATUS__STOP) {
|
||||||
streamMetaReleaseTask(pTask->pMeta, pTask);
|
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||||
stDebug("s-task:%s status:%s not start scan-history again, ref:%d", pTask->id.idStr, p->name, ref);
|
stDebug("s-task:%s status:%s not start scan-history again, ref:%d", pTask->id.idStr, p->name, ref);
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pTask->pMeta, pTask);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -80,7 +80,7 @@ int32_t cfgLoadFromArray(SConfig *pCfg, SArray *pArgs) {
|
||||||
int32_t size = taosArrayGetSize(pArgs);
|
int32_t size = taosArrayGetSize(pArgs);
|
||||||
for (int32_t i = 0; i < size; ++i) {
|
for (int32_t i = 0; i < size; ++i) {
|
||||||
SConfigPair *pPair = taosArrayGet(pArgs, i);
|
SConfigPair *pPair = taosArrayGet(pArgs, i);
|
||||||
if (cfgSetItem(pCfg, pPair->name, pPair->value, CFG_STYPE_ARG_LIST) != 0) {
|
if (cfgSetItem(pCfg, pPair->name, pPair->value, CFG_STYPE_ARG_LIST, true) != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -88,7 +88,7 @@ int32_t cfgLoadFromArray(SConfig *pCfg, SArray *pArgs) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cfgFreeItem(SConfigItem *pItem) {
|
void cfgItemFreeVal(SConfigItem *pItem) {
|
||||||
if (pItem->dtype == CFG_DTYPE_STRING || pItem->dtype == CFG_DTYPE_DIR || pItem->dtype == CFG_DTYPE_LOCALE ||
|
if (pItem->dtype == CFG_DTYPE_STRING || pItem->dtype == CFG_DTYPE_DIR || pItem->dtype == CFG_DTYPE_LOCALE ||
|
||||||
pItem->dtype == CFG_DTYPE_CHARSET || pItem->dtype == CFG_DTYPE_TIMEZONE) {
|
pItem->dtype == CFG_DTYPE_CHARSET || pItem->dtype == CFG_DTYPE_TIMEZONE) {
|
||||||
taosMemoryFreeClear(pItem->str);
|
taosMemoryFreeClear(pItem->str);
|
||||||
|
@ -100,23 +100,26 @@ static void cfgFreeItem(SConfigItem *pItem) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void cfgCleanup(SConfig *pCfg) {
|
void cfgCleanup(SConfig *pCfg) {
|
||||||
if (pCfg != NULL) {
|
if (pCfg == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t size = taosArrayGetSize(pCfg->array);
|
int32_t size = taosArrayGetSize(pCfg->array);
|
||||||
for (int32_t i = 0; i < size; ++i) {
|
for (int32_t i = 0; i < size; ++i) {
|
||||||
SConfigItem *pItem = taosArrayGet(pCfg->array, i);
|
SConfigItem *pItem = taosArrayGet(pCfg->array, i);
|
||||||
cfgFreeItem(pItem);
|
cfgItemFreeVal(pItem);
|
||||||
taosMemoryFreeClear(pItem->name);
|
taosMemoryFreeClear(pItem->name);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosArrayDestroy(pCfg->array);
|
taosArrayDestroy(pCfg->array);
|
||||||
taosThreadMutexDestroy(&pCfg->lock);
|
taosThreadMutexDestroy(&pCfg->lock);
|
||||||
taosMemoryFree(pCfg);
|
taosMemoryFree(pCfg);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
int32_t cfgGetSize(SConfig *pCfg) { return taosArrayGetSize(pCfg->array); }
|
int32_t cfgGetSize(SConfig *pCfg) { return taosArrayGetSize(pCfg->array); }
|
||||||
|
|
||||||
static int32_t cfgCheckAndSetConf(SConfigItem *pItem, const char *conf) {
|
static int32_t cfgCheckAndSetConf(SConfigItem *pItem, const char *conf) {
|
||||||
cfgFreeItem(pItem);
|
cfgItemFreeVal(pItem);
|
||||||
ASSERT(pItem->str == NULL);
|
ASSERT(pItem->str == NULL);
|
||||||
|
|
||||||
pItem->str = taosStrdup(conf);
|
pItem->str = taosStrdup(conf);
|
||||||
|
@ -257,13 +260,21 @@ static int32_t cfgSetTimezone(SConfigItem *pItem, const char *value, ECfgSrcType
|
||||||
|
|
||||||
static int32_t cfgSetTfsItem(SConfig *pCfg, const char *name, const char *value, const char *level, const char *primary,
|
static int32_t cfgSetTfsItem(SConfig *pCfg, const char *name, const char *value, const char *level, const char *primary,
|
||||||
ECfgSrcType stype) {
|
ECfgSrcType stype) {
|
||||||
|
taosThreadMutexLock(&pCfg->lock);
|
||||||
|
|
||||||
SConfigItem *pItem = cfgGetItem(pCfg, name);
|
SConfigItem *pItem = cfgGetItem(pCfg, name);
|
||||||
if (pItem == NULL) return -1;
|
if (pItem == NULL) {
|
||||||
|
taosThreadMutexUnlock(&pCfg->lock);
|
||||||
|
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
if (pItem->array == NULL) {
|
if (pItem->array == NULL) {
|
||||||
pItem->array = taosArrayInit(16, sizeof(SDiskCfg));
|
pItem->array = taosArrayInit(16, sizeof(SDiskCfg));
|
||||||
if (pItem->array == NULL) {
|
if (pItem->array == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
taosThreadMutexUnlock(&pCfg->lock);
|
||||||
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -275,10 +286,14 @@ static int32_t cfgSetTfsItem(SConfig *pCfg, const char *name, const char *value,
|
||||||
void *ret = taosArrayPush(pItem->array, &cfg);
|
void *ret = taosArrayPush(pItem->array, &cfg);
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
taosThreadMutexUnlock(&pCfg->lock);
|
||||||
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
pItem->stype = stype;
|
pItem->stype = stype;
|
||||||
|
taosThreadMutexUnlock(&pCfg->lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -310,17 +325,21 @@ static int32_t cfgUpdateDebugFlagItem(SConfig *pCfg, const char *name, bool rese
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t cfgSetItem(SConfig *pCfg, const char *name, const char *value, ECfgSrcType stype) {
|
int32_t cfgSetItem(SConfig *pCfg, const char *name, const char *value, ECfgSrcType stype, bool lock) {
|
||||||
// GRANT_CFG_SET;
|
// GRANT_CFG_SET;
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
|
||||||
|
if (lock) {
|
||||||
|
taosThreadMutexLock(&pCfg->lock);
|
||||||
|
}
|
||||||
|
|
||||||
SConfigItem *pItem = cfgGetItem(pCfg, name);
|
SConfigItem *pItem = cfgGetItem(pCfg, name);
|
||||||
if (pItem == NULL) {
|
if (pItem == NULL) {
|
||||||
terrno = TSDB_CODE_CFG_NOT_FOUND;
|
terrno = TSDB_CODE_CFG_NOT_FOUND;
|
||||||
|
taosThreadMutexUnlock(&pCfg->lock);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
taosThreadMutexLock(&pCfg->lock);
|
|
||||||
|
|
||||||
switch (pItem->dtype) {
|
switch (pItem->dtype) {
|
||||||
case CFG_DTYPE_BOOL: {
|
case CFG_DTYPE_BOOL: {
|
||||||
code = cfgSetBool(pItem, value, stype);
|
code = cfgSetBool(pItem, value, stype);
|
||||||
|
@ -365,16 +384,19 @@ int32_t cfgSetItem(SConfig *pCfg, const char *name, const char *value, ECfgSrcTy
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (lock) {
|
||||||
taosThreadMutexUnlock(&pCfg->lock);
|
taosThreadMutexUnlock(&pCfg->lock);
|
||||||
|
}
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
SConfigItem *cfgGetItem(SConfig *pCfg, const char *name) {
|
SConfigItem *cfgGetItem(SConfig *pCfg, const char *pName) {
|
||||||
if (pCfg == NULL) return NULL;
|
if (pCfg == NULL) return NULL;
|
||||||
int32_t size = taosArrayGetSize(pCfg->array);
|
int32_t size = taosArrayGetSize(pCfg->array);
|
||||||
for (int32_t i = 0; i < size; ++i) {
|
for (int32_t i = 0; i < size; ++i) {
|
||||||
SConfigItem *pItem = taosArrayGet(pCfg->array, i);
|
SConfigItem *pItem = taosArrayGet(pCfg->array, i);
|
||||||
if (strcasecmp(pItem->name, name) == 0) {
|
if (strcasecmp(pItem->name, pName) == 0) {
|
||||||
return pItem;
|
return pItem;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -383,13 +405,28 @@ SConfigItem *cfgGetItem(SConfig *pCfg, const char *name) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void cfgLock(SConfig *pCfg) {
|
||||||
|
if (pCfg == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
taosThreadMutexLock(&pCfg->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
void cfgUnLock(SConfig *pCfg) {
|
||||||
|
taosThreadMutexUnlock(&pCfg->lock);
|
||||||
|
}
|
||||||
|
|
||||||
int32_t cfgCheckRangeForDynUpdate(SConfig *pCfg, const char *name, const char *pVal, bool isServer) {
|
int32_t cfgCheckRangeForDynUpdate(SConfig *pCfg, const char *name, const char *pVal, bool isServer) {
|
||||||
ECfgDynType dynType = isServer ? CFG_DYN_SERVER : CFG_DYN_CLIENT;
|
ECfgDynType dynType = isServer ? CFG_DYN_SERVER : CFG_DYN_CLIENT;
|
||||||
|
|
||||||
|
cfgLock(pCfg);
|
||||||
|
|
||||||
SConfigItem *pItem = cfgGetItem(pCfg, name);
|
SConfigItem *pItem = cfgGetItem(pCfg, name);
|
||||||
if (!pItem || (pItem->dynScope & dynType) == 0) {
|
if (!pItem || (pItem->dynScope & dynType) == 0) {
|
||||||
uError("failed to config:%s, not support update this config", name);
|
uError("failed to config:%s, not support update this config", name);
|
||||||
terrno = TSDB_CODE_INVALID_CFG;
|
terrno = TSDB_CODE_INVALID_CFG;
|
||||||
|
cfgUnLock(pCfg);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -399,28 +436,37 @@ int32_t cfgCheckRangeForDynUpdate(SConfig *pCfg, const char *name, const char *p
|
||||||
if (ival != 0 && ival != 1) {
|
if (ival != 0 && ival != 1) {
|
||||||
uError("cfg:%s, type:%s value:%d out of range[0, 1]", pItem->name, cfgDtypeStr(pItem->dtype), ival);
|
uError("cfg:%s, type:%s value:%d out of range[0, 1]", pItem->name, cfgDtypeStr(pItem->dtype), ival);
|
||||||
terrno = TSDB_CODE_OUT_OF_RANGE;
|
terrno = TSDB_CODE_OUT_OF_RANGE;
|
||||||
|
cfgUnLock(pCfg);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
case CFG_DTYPE_INT32: {
|
case CFG_DTYPE_INT32: {
|
||||||
int32_t ival;
|
int32_t ival;
|
||||||
int32_t code = (int32_t)taosStrHumanToInt32(pVal, &ival);
|
int32_t code = (int32_t)taosStrHumanToInt32(pVal, &ival);
|
||||||
if (code != TSDB_CODE_SUCCESS) return code;
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
cfgUnLock(pCfg);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
if (ival < pItem->imin || ival > pItem->imax) {
|
if (ival < pItem->imin || ival > pItem->imax) {
|
||||||
uError("cfg:%s, type:%s value:%d out of range[%" PRId64 ", %" PRId64 "]", pItem->name,
|
uError("cfg:%s, type:%s value:%d out of range[%" PRId64 ", %" PRId64 "]", pItem->name,
|
||||||
cfgDtypeStr(pItem->dtype), ival, pItem->imin, pItem->imax);
|
cfgDtypeStr(pItem->dtype), ival, pItem->imin, pItem->imax);
|
||||||
terrno = TSDB_CODE_OUT_OF_RANGE;
|
terrno = TSDB_CODE_OUT_OF_RANGE;
|
||||||
|
cfgUnLock(pCfg);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
case CFG_DTYPE_INT64: {
|
case CFG_DTYPE_INT64: {
|
||||||
int64_t ival;
|
int64_t ival;
|
||||||
int32_t code = taosStrHumanToInt64(pVal, &ival);
|
int32_t code = taosStrHumanToInt64(pVal, &ival);
|
||||||
if (code != TSDB_CODE_SUCCESS) return code;
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
cfgUnLock(pCfg);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
if (ival < pItem->imin || ival > pItem->imax) {
|
if (ival < pItem->imin || ival > pItem->imax) {
|
||||||
uError("cfg:%s, type:%s value:%" PRId64 " out of range[%" PRId64 ", %" PRId64 "]", pItem->name,
|
uError("cfg:%s, type:%s value:%" PRId64 " out of range[%" PRId64 ", %" PRId64 "]", pItem->name,
|
||||||
cfgDtypeStr(pItem->dtype), ival, pItem->imin, pItem->imax);
|
cfgDtypeStr(pItem->dtype), ival, pItem->imin, pItem->imax);
|
||||||
terrno = TSDB_CODE_OUT_OF_RANGE;
|
terrno = TSDB_CODE_OUT_OF_RANGE;
|
||||||
|
cfgUnLock(pCfg);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
|
@ -428,17 +474,23 @@ int32_t cfgCheckRangeForDynUpdate(SConfig *pCfg, const char *name, const char *p
|
||||||
case CFG_DTYPE_DOUBLE: {
|
case CFG_DTYPE_DOUBLE: {
|
||||||
double dval;
|
double dval;
|
||||||
int32_t code = parseCfgReal(pVal, &dval);
|
int32_t code = parseCfgReal(pVal, &dval);
|
||||||
if (code != TSDB_CODE_SUCCESS) return code;
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
cfgUnLock(pCfg);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
if (dval < pItem->fmin || dval > pItem->fmax) {
|
if (dval < pItem->fmin || dval > pItem->fmax) {
|
||||||
uError("cfg:%s, type:%s value:%f out of range[%f, %f]", pItem->name, cfgDtypeStr(pItem->dtype), dval,
|
uError("cfg:%s, type:%s value:%f out of range[%f, %f]", pItem->name, cfgDtypeStr(pItem->dtype), dval,
|
||||||
pItem->fmin, pItem->fmax);
|
pItem->fmin, pItem->fmax);
|
||||||
terrno = TSDB_CODE_OUT_OF_RANGE;
|
terrno = TSDB_CODE_OUT_OF_RANGE;
|
||||||
|
cfgUnLock(pCfg);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cfgUnLock(pCfg);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -877,7 +929,7 @@ int32_t cfgLoadFromEnvVar(SConfig *pConfig) {
|
||||||
if (vlen3 != 0) value3[vlen3] = 0;
|
if (vlen3 != 0) value3[vlen3] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = cfgSetItem(pConfig, name, value, CFG_STYPE_ENV_VAR);
|
code = cfgSetItem(pConfig, name, value, CFG_STYPE_ENV_VAR, true);
|
||||||
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
|
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
|
||||||
|
|
||||||
if (strcasecmp(name, "dataDir") == 0) {
|
if (strcasecmp(name, "dataDir") == 0) {
|
||||||
|
@ -920,7 +972,7 @@ int32_t cfgLoadFromEnvCmd(SConfig *pConfig, const char **envCmd) {
|
||||||
if (vlen3 != 0) value3[vlen3] = 0;
|
if (vlen3 != 0) value3[vlen3] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = cfgSetItem(pConfig, name, value, CFG_STYPE_ENV_CMD);
|
code = cfgSetItem(pConfig, name, value, CFG_STYPE_ENV_CMD, true);
|
||||||
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
|
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
|
||||||
|
|
||||||
if (strcasecmp(name, "dataDir") == 0) {
|
if (strcasecmp(name, "dataDir") == 0) {
|
||||||
|
@ -985,7 +1037,7 @@ int32_t cfgLoadFromEnvFile(SConfig *pConfig, const char *envFile) {
|
||||||
if (vlen3 != 0) value3[vlen3] = 0;
|
if (vlen3 != 0) value3[vlen3] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = cfgSetItem(pConfig, name, value, CFG_STYPE_ENV_FILE);
|
code = cfgSetItem(pConfig, name, value, CFG_STYPE_ENV_FILE, true);
|
||||||
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
|
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
|
||||||
|
|
||||||
if (strcasecmp(name, "dataDir") == 0) {
|
if (strcasecmp(name, "dataDir") == 0) {
|
||||||
|
@ -1055,10 +1107,9 @@ int32_t cfgLoadFromCfgFile(SConfig *pConfig, const char *filepath) {
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = cfgSetItem(pConfig, name, newValue, CFG_STYPE_CFG_FILE);
|
code = cfgSetItem(pConfig, name, newValue, CFG_STYPE_CFG_FILE, true);
|
||||||
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
|
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
|
||||||
}
|
} else {
|
||||||
else{
|
|
||||||
paGetToken(value + vlen + 1, &value2, &vlen2);
|
paGetToken(value + vlen + 1, &value2, &vlen2);
|
||||||
if (vlen2 != 0) {
|
if (vlen2 != 0) {
|
||||||
value2[vlen2] = 0;
|
value2[vlen2] = 0;
|
||||||
|
@ -1066,7 +1117,7 @@ int32_t cfgLoadFromCfgFile(SConfig *pConfig, const char *filepath) {
|
||||||
if (vlen3 != 0) value3[vlen3] = 0;
|
if (vlen3 != 0) value3[vlen3] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = cfgSetItem(pConfig, name, value, CFG_STYPE_CFG_FILE);
|
code = cfgSetItem(pConfig, name, value, CFG_STYPE_CFG_FILE, true);
|
||||||
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
|
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1241,7 +1292,7 @@ int32_t cfgLoadFromApollUrl(SConfig *pConfig, const char *url) {
|
||||||
if (vlen3 != 0) value3[vlen3] = 0;
|
if (vlen3 != 0) value3[vlen3] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = cfgSetItem(pConfig, name, value, CFG_STYPE_APOLLO_URL);
|
code = cfgSetItem(pConfig, name, value, CFG_STYPE_APOLLO_URL, true);
|
||||||
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
|
if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break;
|
||||||
|
|
||||||
if (strcasecmp(name, "dataDir") == 0) {
|
if (strcasecmp(name, "dataDir") == 0) {
|
||||||
|
|
|
@ -185,8 +185,8 @@ class TDTestCase:
|
||||||
# baseVersion = "3.0.1.8"
|
# baseVersion = "3.0.1.8"
|
||||||
|
|
||||||
tdLog.printNoPrefix(f"==========step1:prepare and check data in old version-{BASEVERSION}")
|
tdLog.printNoPrefix(f"==========step1:prepare and check data in old version-{BASEVERSION}")
|
||||||
tdLog.info(f" LD_LIBRARY_PATH=/usr/lib taosBenchmark -t {tableNumbers} -n {recordNumbers1} -y ")
|
tdLog.info(f" LD_LIBRARY_PATH=/usr/lib taosBenchmark -t {tableNumbers} -n {recordNumbers1} -v 1 -y ")
|
||||||
os.system(f"LD_LIBRARY_PATH=/usr/lib taosBenchmark -t {tableNumbers} -n {recordNumbers1} -y ")
|
os.system(f"LD_LIBRARY_PATH=/usr/lib taosBenchmark -t {tableNumbers} -n {recordNumbers1} -v 1 -y ")
|
||||||
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'flush database test '")
|
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'flush database test '")
|
||||||
os.system("LD_LIBRARY_PATH=/usr/lib taosBenchmark -f 0-others/com_alltypedata.json -y")
|
os.system("LD_LIBRARY_PATH=/usr/lib taosBenchmark -f 0-others/com_alltypedata.json -y")
|
||||||
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'flush database curdb '")
|
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'flush database curdb '")
|
||||||
|
@ -196,49 +196,81 @@ class TDTestCase:
|
||||||
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'select min(ui) from curdb.meters '")
|
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'select min(ui) from curdb.meters '")
|
||||||
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'select max(bi) from curdb.meters '")
|
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'select max(bi) from curdb.meters '")
|
||||||
|
|
||||||
# os.system(f"LD_LIBRARY_PATH=/usr/lib taos -s 'use test;create stream current_stream into current_stream_output_stb as select _wstart as `start`, _wend as wend, max(current) as max_current from meters where voltage <= 220 interval (5s);' ")
|
os.system(f"LD_LIBRARY_PATH=/usr/lib taos -s 'use test;create stream current_stream into current_stream_output_stb as select _wstart as `start`, _wend as wend, max(current) as max_current from meters where voltage <= 220 interval (5s);' ")
|
||||||
# os.system('LD_LIBRARY_PATH=/usr/lib taos -s "use test;create stream power_stream into power_stream_output_stb as select ts, concat_ws(\\".\\", location, tbname) as meter_location, current*voltage*cos(phase) as active_power, current*voltage*sin(phase) as reactive_power from meters partition by tbname;" ')
|
os.system('LD_LIBRARY_PATH=/usr/lib taos -s "use test;create stream power_stream trigger at_once into power_stream_output_stb as select ts, concat_ws(\\".\\", location, tbname) as meter_location, current*voltage*cos(phase) as active_power, current*voltage*sin(phase) as reactive_power from meters partition by tbname;" ')
|
||||||
# os.system('LD_LIBRARY_PATH=/usr/lib taos -s "use test;show streams;" ')
|
os.system('LD_LIBRARY_PATH=/usr/lib taos -s "use test;show streams;" ')
|
||||||
|
|
||||||
self.alter_string_in_file("0-others/tmqBasic.json", "/etc/taos/", cPath)
|
self.alter_string_in_file("0-others/tmqBasic.json", "/etc/taos/", cPath)
|
||||||
# os.system("LD_LIBRARY_PATH=/usr/lib taosBenchmark -f 0-others/tmqBasic.json -y ")
|
# os.system("LD_LIBRARY_PATH=/usr/lib taosBenchmark -f 0-others/tmqBasic.json -y ")
|
||||||
os.system('LD_LIBRARY_PATH=/usr/lib taos -s "create topic if not exists tmq_test_topic as select current,voltage,phase from test.meters where voltage <= 106 and current <= 5;" ')
|
# create db/stb/select topic
|
||||||
|
|
||||||
|
db_topic = "db_test_topic"
|
||||||
|
os.system(f'LD_LIBRARY_PATH=/usr/lib taos -s "create topic if not exists {db_topic} with meta as database test" ')
|
||||||
|
|
||||||
|
stable_topic = "stable_test_meters_topic"
|
||||||
|
os.system(f'LD_LIBRARY_PATH=/usr/lib taos -s "create topic if not exists {stable_topic} as stable test.meters where tbname like \\"d3\\";" ')
|
||||||
|
|
||||||
|
select_topic = "select_test_meters_topic"
|
||||||
|
os.system(f'LD_LIBRARY_PATH=/usr/lib taos -s "create topic if not exists {select_topic} as select current,voltage,phase from test.meters where voltage >= 170;" ')
|
||||||
|
|
||||||
os.system('LD_LIBRARY_PATH=/usr/lib taos -s "use test;show topics;" ')
|
os.system('LD_LIBRARY_PATH=/usr/lib taos -s "use test;show topics;" ')
|
||||||
os.system(f" /usr/bin/taosadapter --version " )
|
os.system(f" /usr/bin/taosadapter --version " )
|
||||||
consumer_dict = {
|
consumer_dict = {
|
||||||
"group.id": "g1",
|
"group.id": "g1",
|
||||||
|
"td.connect.websocket.scheme": "ws",
|
||||||
"td.connect.user": "root",
|
"td.connect.user": "root",
|
||||||
"td.connect.pass": "taosdata",
|
"td.connect.pass": "taosdata",
|
||||||
"auto.offset.reset": "earliest",
|
"auto.offset.reset": "earliest",
|
||||||
|
"enable.auto.commit": "false",
|
||||||
}
|
}
|
||||||
|
|
||||||
consumer = taosws.Consumer(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
|
consumer = taosws.Consumer(consumer_dict)
|
||||||
try:
|
try:
|
||||||
consumer.subscribe(["tmq_test_topic"])
|
consumer.subscribe([select_topic])
|
||||||
except TmqError:
|
except TmqError:
|
||||||
tdLog.exit(f"subscribe error")
|
tdLog.exit(f"subscribe error")
|
||||||
|
first_consumer_rows = 0
|
||||||
while True:
|
while True:
|
||||||
message = consumer.poll(timeout=1.0)
|
message = consumer.poll(timeout=1.0)
|
||||||
if message:
|
if message:
|
||||||
print("message")
|
|
||||||
id = message.vgroup()
|
|
||||||
topic = message.topic()
|
|
||||||
database = message.database()
|
|
||||||
|
|
||||||
for block in message:
|
for block in message:
|
||||||
nrows = block.nrows()
|
first_consumer_rows += block.nrows()
|
||||||
ncols = block.ncols()
|
|
||||||
for row in block:
|
|
||||||
print(row)
|
|
||||||
values = block.fetchall()
|
|
||||||
print(nrows, ncols)
|
|
||||||
|
|
||||||
consumer.commit(message)
|
|
||||||
else:
|
else:
|
||||||
print("break")
|
tdLog.notice("message is null and break")
|
||||||
|
break
|
||||||
|
consumer.commit(message)
|
||||||
|
tdLog.debug(f"topic:{select_topic} ,first consumer rows is {first_consumer_rows} in old version")
|
||||||
break
|
break
|
||||||
|
|
||||||
consumer.close()
|
consumer.close()
|
||||||
|
# consumer_dict2 = {
|
||||||
|
# "group.id": "g2",
|
||||||
|
# "td.connect.websocket.scheme": "ws",
|
||||||
|
# "td.connect.user": "root",
|
||||||
|
# "td.connect.pass": "taosdata",
|
||||||
|
# "auto.offset.reset": "earliest",
|
||||||
|
# "enable.auto.commit": "false",
|
||||||
|
# }
|
||||||
|
# consumer = taosws.Consumer(consumer_dict2)
|
||||||
|
# try:
|
||||||
|
# consumer.subscribe([db_topic,stable_topic])
|
||||||
|
# except TmqError:
|
||||||
|
# tdLog.exit(f"subscribe error")
|
||||||
|
# first_consumer_rows = 0
|
||||||
|
# while True:
|
||||||
|
# message = consumer.poll(timeout=1.0)
|
||||||
|
# if message:
|
||||||
|
# for block in message:
|
||||||
|
# first_consumer_rows += block.nrows()
|
||||||
|
# else:
|
||||||
|
# tdLog.notice("message is null and break")
|
||||||
|
# break
|
||||||
|
# consumer.commit(message)
|
||||||
|
# tdLog.debug(f"topic:{select_topic} ,first consumer rows is {first_consumer_rows} in old version")
|
||||||
|
# break
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
tdLog.info(" LD_LIBRARY_PATH=/usr/lib taosBenchmark -f 0-others/compa4096.json -y ")
|
tdLog.info(" LD_LIBRARY_PATH=/usr/lib taosBenchmark -f 0-others/compa4096.json -y ")
|
||||||
os.system("LD_LIBRARY_PATH=/usr/lib taosBenchmark -f 0-others/compa4096.json -y")
|
os.system("LD_LIBRARY_PATH=/usr/lib taosBenchmark -f 0-others/compa4096.json -y")
|
||||||
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'flush database db4096 '")
|
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'flush database db4096 '")
|
||||||
|
@ -279,11 +311,10 @@ class TDTestCase:
|
||||||
tdLog.printNoPrefix(f"==========step3:prepare and check data in new version-{nowServerVersion}")
|
tdLog.printNoPrefix(f"==========step3:prepare and check data in new version-{nowServerVersion}")
|
||||||
tdsql.query(f"select count(*) from {stb}")
|
tdsql.query(f"select count(*) from {stb}")
|
||||||
tdsql.checkData(0,0,tableNumbers*recordNumbers1)
|
tdsql.checkData(0,0,tableNumbers*recordNumbers1)
|
||||||
# tdsql.query("show streams;")
|
tdsql.query("show streams;")
|
||||||
# os.system(f"taosBenchmark -t {tableNumbers} -n {recordNumbers2} -y ")
|
tdsql.checkRows(2)
|
||||||
# tdsql.query("show streams;")
|
|
||||||
# tdsql.query(f"select count(*) from {stb}")
|
|
||||||
# tdsql.checkData(0,0,tableNumbers*recordNumbers2)
|
|
||||||
|
|
||||||
# checkout db4096
|
# checkout db4096
|
||||||
tdsql.query("select count(*) from db4096.stb0")
|
tdsql.query("select count(*) from db4096.stb0")
|
||||||
|
@ -334,7 +365,7 @@ class TDTestCase:
|
||||||
|
|
||||||
# check stream
|
# check stream
|
||||||
tdsql.query("show streams;")
|
tdsql.query("show streams;")
|
||||||
tdsql.checkRows(0)
|
tdsql.checkRows(2)
|
||||||
|
|
||||||
#check TS-3131
|
#check TS-3131
|
||||||
tdsql.query("select *,tbname from d0.almlog where mcid='m0103';")
|
tdsql.query("select *,tbname from d0.almlog where mcid='m0103';")
|
||||||
|
@ -348,39 +379,48 @@ class TDTestCase:
|
||||||
print("The unordered list is the same as the ordered list.")
|
print("The unordered list is the same as the ordered list.")
|
||||||
else:
|
else:
|
||||||
tdLog.exit("The unordered list is not the same as the ordered list.")
|
tdLog.exit("The unordered list is not the same as the ordered list.")
|
||||||
tdsql.execute("insert into test.d80 values (now+1s, 11, 103, 0.21);")
|
|
||||||
tdsql.execute("insert into test.d9 values (now+5s, 4.3, 104, 0.4);")
|
|
||||||
|
|
||||||
|
|
||||||
# check tmq
|
# check tmq
|
||||||
|
tdsql.execute("insert into test.d80 values (now+1s, 11, 190, 0.21);")
|
||||||
|
tdsql.execute("insert into test.d9 values (now+5s, 4.3, 104, 0.4);")
|
||||||
conn = taos.connect()
|
conn = taos.connect()
|
||||||
|
|
||||||
consumer = Consumer(
|
consumer = Consumer(
|
||||||
{
|
{
|
||||||
"group.id": "tg75",
|
"group.id": "g1",
|
||||||
"client.id": "124",
|
|
||||||
"td.connect.user": "root",
|
"td.connect.user": "root",
|
||||||
"td.connect.pass": "taosdata",
|
"td.connect.pass": "taosdata",
|
||||||
"enable.auto.commit": "true",
|
"enable.auto.commit": "true",
|
||||||
"experimental.snapshot.enable": "true",
|
"experimental.snapshot.enable": "true",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
consumer.subscribe(["tmq_test_topic"])
|
consumer.subscribe([select_topic])
|
||||||
|
consumer_rows = 0
|
||||||
while True:
|
while True:
|
||||||
res = consumer.poll(10)
|
message = consumer.poll(timeout=1.0)
|
||||||
if not res:
|
tdLog.info(f" null {message}")
|
||||||
|
if message:
|
||||||
|
for block in message:
|
||||||
|
consumer_rows += block.nrows()
|
||||||
|
tdLog.info(f"consumer rows is {consumer_rows}")
|
||||||
|
else:
|
||||||
|
print("consumer has completed and break")
|
||||||
break
|
break
|
||||||
err = res.error()
|
consumer.close()
|
||||||
if err is not None:
|
tdsql.query("select current,voltage,phase from test.meters where voltage >= 170;")
|
||||||
raise err
|
all_rows = tdsql.queryRows
|
||||||
val = res.value()
|
if consumer_rows < all_rows - first_consumer_rows :
|
||||||
|
tdLog.exit(f"consumer rows is {consumer_rows}, less than {all_rows - first_consumer_rows}")
|
||||||
for block in val:
|
|
||||||
print(block.fetchall())
|
|
||||||
tdsql.query("show topics;")
|
tdsql.query("show topics;")
|
||||||
tdsql.checkRows(1)
|
tdsql.checkRows(3)
|
||||||
|
tdsql.execute(f"drop topic {select_topic};",queryTimes=10)
|
||||||
|
tdsql.execute(f"drop topic {db_topic};",queryTimes=10)
|
||||||
|
tdsql.execute(f"drop topic {stable_topic};",queryTimes=10)
|
||||||
|
|
||||||
|
os.system(f" LD_LIBRARY_PATH={bPath}/build/lib {bPath}/build/bin/taosBenchmark -t {tableNumbers} -n {recordNumbers2} -y ")
|
||||||
|
tdsql.query(f"select count(*) from {stb}")
|
||||||
|
tdsql.checkData(0,0,tableNumbers*recordNumbers2)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
tdSql.close()
|
tdSql.close()
|
||||||
|
|
|
@ -62,7 +62,7 @@ class TDTestCase:
|
||||||
while True:
|
while True:
|
||||||
res = consumer.poll(1)
|
res = consumer.poll(1)
|
||||||
if not res:
|
if not res:
|
||||||
break
|
continue
|
||||||
val = res.value()
|
val = res.value()
|
||||||
if val is None:
|
if val is None:
|
||||||
continue
|
continue
|
||||||
|
@ -173,7 +173,7 @@ class TDTestCase:
|
||||||
while True:
|
while True:
|
||||||
res = consumer.poll(1)
|
res = consumer.poll(1)
|
||||||
if not res:
|
if not res:
|
||||||
break
|
continue
|
||||||
val = res.value()
|
val = res.value()
|
||||||
if val is None:
|
if val is None:
|
||||||
continue
|
continue
|
||||||
|
@ -282,7 +282,7 @@ class TDTestCase:
|
||||||
while True:
|
while True:
|
||||||
res = consumer.poll(1)
|
res = consumer.poll(1)
|
||||||
if not res:
|
if not res:
|
||||||
break
|
continue
|
||||||
val = res.value()
|
val = res.value()
|
||||||
if val is None:
|
if val is None:
|
||||||
continue
|
continue
|
||||||
|
@ -391,7 +391,7 @@ class TDTestCase:
|
||||||
while True:
|
while True:
|
||||||
res = consumer.poll(1)
|
res = consumer.poll(1)
|
||||||
if not res:
|
if not res:
|
||||||
break
|
continue
|
||||||
val = res.value()
|
val = res.value()
|
||||||
if val is None:
|
if val is None:
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -1866,6 +1866,29 @@ int sml_td29691_Test() {
|
||||||
printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes));
|
printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes));
|
||||||
ASSERT(code == TSDB_CODE_PAR_DUPLICATED_COLUMN);
|
ASSERT(code == TSDB_CODE_PAR_DUPLICATED_COLUMN);
|
||||||
taos_free_result(pRes);
|
taos_free_result(pRes);
|
||||||
|
|
||||||
|
//check column tag name duplication when update
|
||||||
|
const char *sql7[] = {
|
||||||
|
"vbin,t1=1,t2=2,f1=ewe f2=b\"hello\" 1632299372003",
|
||||||
|
};
|
||||||
|
pRes = taos_schemaless_insert(taos, (char **)sql7, sizeof(sql7) / sizeof(sql7[0]), TSDB_SML_LINE_PROTOCOL,
|
||||||
|
TSDB_SML_TIMESTAMP_MILLI_SECONDS);
|
||||||
|
code = taos_errno(pRes);
|
||||||
|
printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes));
|
||||||
|
ASSERT(code == TSDB_CODE_PAR_DUPLICATED_COLUMN);
|
||||||
|
taos_free_result(pRes);
|
||||||
|
|
||||||
|
//check column tag name duplication when update
|
||||||
|
const char *sql6[] = {
|
||||||
|
"vbin,t1=1 t2=2,f1=1,f2=b\"hello\" 1632299372004",
|
||||||
|
};
|
||||||
|
pRes = taos_schemaless_insert(taos, (char **)sql6, sizeof(sql6) / sizeof(sql6[0]), TSDB_SML_LINE_PROTOCOL,
|
||||||
|
TSDB_SML_TIMESTAMP_MILLI_SECONDS);
|
||||||
|
code = taos_errno(pRes);
|
||||||
|
printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes));
|
||||||
|
ASSERT(code == TSDB_CODE_PAR_DUPLICATED_COLUMN);
|
||||||
|
taos_free_result(pRes);
|
||||||
|
|
||||||
taos_close(taos);
|
taos_close(taos);
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
|
|
Loading…
Reference in New Issue