Merge branch '3.0' into fix/TD-28376-3.0

This commit is contained in:
kailixu 2024-02-24 22:23:20 +08:00
commit e8fc27075b
384 changed files with 37409 additions and 11680 deletions

View File

@ -306,7 +306,7 @@ def pre_test_build_win() {
cd %WIN_CONNECTOR_ROOT%
python.exe -m pip install --upgrade pip
python -m pip uninstall taospy -y
python -m pip install taospy==2.7.12
python -m pip install taospy==2.7.13
python -m pip uninstall taos-ws-py -y
python -m pip install taos-ws-py==0.3.1
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32

View File

@ -12,7 +12,7 @@
[![Build Status](https://travis-ci.org/taosdata/TDengine.svg?branch=master)](https://travis-ci.org/taosdata/TDengine)
[![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=3.0)](https://coveralls.io/github/taosdata/TDengine?branch=3.0)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201)
简体中文 | [English](README.md) | [TDengine 云服务](https://cloud.taosdata.com/?utm_medium=cn&utm_source=github) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/cn/careers/)

View File

@ -12,7 +12,7 @@
[![Build Status](https://cloud.drone.io/api/badges/taosdata/TDengine/status.svg?ref=refs/heads/master)](https://cloud.drone.io/taosdata/TDengine)
[![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=3.0)](https://coveralls.io/github/taosdata/TDengine?branch=3.0)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201)
<br />
[![Twitter Follow](https://img.shields.io/twitter/follow/tdenginedb?label=TDengine&style=social)](https://twitter.com/tdenginedb)

View File

@ -1,6 +1,6 @@
# openssl
ExternalProject_Add(openssl
URL https://www.openssl.org/source/openssl-3.1.3.tar.gz
URL https://github.com/openssl/openssl/releases/download/openssl-3.1.3/openssl-3.1.3.tar.gz
URL_HASH SHA256=f0316a2ebd89e7f2352976445458689f80302093788c466692fb2a188b2eacf6
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"

BIN
deps/mips/dm_static/libdmodule.a vendored Normal file

Binary file not shown.

View File

@ -101,7 +101,7 @@ Query OK, 2 row(s) in set (0.004076s)
## Query Examples
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1}, here is the query SQL:
If you want query the data of "tags": &lcub;"location": "California.LosAngeles", "groupid": 1&rcub;, here is the query SQL:
```sql
SELECT * FROM `meters_current` WHERE location = "California.LosAngeles" AND groupid = 3;

View File

@ -22,7 +22,7 @@ import CAsync from "./_c_async.mdx";
SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or client libraries. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine:
- Query on single column or multiple columns
- Filter on tags or data columns: >, <, =, <\>, like
- Filter on tags or data columns: &gt;, &lt;, =, &lt;&gt;, like
- Grouping of results: `Group By` - Sorting of results: `Order By` - Limit the number of results: `Limit/Offset`
- Windowed aggregate queries for time windows (interval), session windows (session), and state windows (state_window)
- Arithmetic on columns of numeric types or aggregate results
@ -159,7 +159,7 @@ In the section describing [Insert](../insert-data/sql-writing), a database named
:::note
1. With either REST connection or native connection, the above sample code works well.
2. Please note that `use db` can't be used in case of REST connection because it's stateless. You can specify the database name by either the REST endpoint's parameter or <db_name>.<table_name> in the SQL command.
2. Please note that `use db` can't be used in case of REST connection because it's stateless. You can specify the database name by either the REST endpoint's parameter or &lt;db_name&gt;.&lt;table_name&gt; in the SQL command.
:::

View File

@ -104,7 +104,7 @@ Replace `aggfn` with the name of your function.
### UDF Interface Definition in C
There are strict naming conventions for interface functions. The names of the start, finish, init, and destroy interfaces must be <udf-name\>_start, <udf-name\>_finish, <udf-name\>_init, and <udf-name\>_destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
There are strict naming conventions for interface functions. The names of the start, finish, init, and destroy interfaces must be &lt;udf-name&gt;_start, &lt;udf-name&gt;_finish, &lt;udf-name&gt;_init, and &lt;udf-name&gt;_destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
Interface functions return a value that indicates whether the operation was successful. If an operation fails, the interface function returns an error code. Otherwise, it returns TSDB_CODE_SUCCESS. The error codes are defined in `taoserror.h` and in the common API error codes in `taos.h`. For example, TSDB_CODE_UDF_INVALID_INPUT indicates invalid input. TSDB_CODE_OUT_OF_MEMORY indicates insufficient memory.
@ -194,7 +194,7 @@ typedef struct SUdfInterBuf {
```
The data structure is described as follows:
- The SUdfDataBlock block includes the number of rows (numOfRows) and the number of columns (numCols). udfCols[i] (0 <= i <= numCols-1) indicates that each column is of type SUdfColumn.
- The SUdfDataBlock block includes the number of rows (numOfRows) and the number of columns (numCols). udfCols[i] (0 &lt;= i &lt;= numCols-1) indicates that each column is of type SUdfColumn.
- SUdfColumn includes the definition of the data type of the column (colMeta) and the data in the column (colData).
- The member definitions of SUdfColumnMeta are the same as the data type definitions in `taos.h`.
- The data in SUdfColumnData can become longer. varLenCol indicates variable-length data, and fixLenCol indicates fixed-length data.

View File

@ -186,7 +186,7 @@ The base API is used to do things like create database connections and provide a
- The variables database and len are applied by the user outside and allocated space. The current database name and length will be assigned to database and len.
- As long as the db name is not assigned to the database normally (including truncation), an error will be returned with the return value of -1, and then the user can use taos_errstr(NULL) to get error message.
- If database==NULL or len<=0, returns an error, the space required to store the db (including the last '\0') in the variable required
- If database==NULL or len&lt;=0, returns an error, the space required to store the db (including the last '\0') in the variable required
- If len is less than the space required to store the db (including the last '\0'), an error is returned. The truncated data assigned in the database ends with '\0'.
- If len is greater than or equal to the space required to store the db (including the last '\0'), return normal 0, and assign the db name ending with '\0' in the database.

View File

@ -69,7 +69,7 @@ TDengine currently supports timestamp, number, character, Boolean type, and the
| SMALLINT | i16 |
| TINYINT | i8 |
| BOOL | bool |
| BINARY | Vec<u8\> |
| BINARY | Vec&lt;u8&gt; |
| NCHAR | String |
| JSON | serde_json::Value |

View File

@ -315,7 +315,7 @@ The `connect()` function returns a `taos.TaosConnection` instance. In client-sid
All arguments to the `connect()` function are optional keyword arguments. The following are the connection parameters specified.
- `url`: The URL of taosAdapter REST service. The default is <http://localhost:6041>.
- `url`: The URL of taosAdapter REST service. The default is `http://localhost:6041`.
- `user`: TDengine user name. The default is `root`.
- `password`: TDengine user password. The default is `taosdata`.
- `timeout`: HTTP request timeout. Enter a value in seconds. The default is `socket._GLOBAL_DEFAULT_TIMEOUT`. Usually, no configuration is needed.

View File

@ -8,7 +8,7 @@ description: This document describes the TDengine PHP client library.
PHP client library relies on TDengine client driver.
Project Repository: <https://github.com/Yurunsoft/php-tdengine>
Project Repository: [https://github.com/Yurunsoft/php-tdengine](https://github.com/Yurunsoft/php-tdengine)
After TDengine client or server is installed, `taos.h` is located at:

View File

@ -68,14 +68,14 @@ TDengine supports a variety of constants:
| # | **Syntax** | **Type** | **Description** |
| --- | :-----------------------------------------------: | --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 1 | [{+ \| -}]123 | BIGINT | Integer literals are of type BIGINT. Data that exceeds the length of the BIGINT type is truncated. |
| 1 | [+ \| -]123 | BIGINT | Integer literals are of type BIGINT. Data that exceeds the length of the BIGINT type is truncated. |
| 2 | 123.45 | DOUBLE | Floating-point literals are of type DOUBLE. Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used. |
| 3 | 1.2E3 | DOUBLE | Literals in scientific notation are of type DOUBLE. |
| 4 | 'abc' | BINARY | Content enclosed in single quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal single quote inside the string must be escaped with a backslash `\'`. |
| 5 | 'abc' | BINARY | Content enclosed in double quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal double quote inside the string must be escaped with a backslash `\"`. |
| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | The TIMESTAMP keyword indicates that the following string literal is interpreted as a timestamp. The string must be in YYYY-MM-DD HH:mm:ss.MS format. The precision is inherited from the database configuration. |
| 7 | {TRUE \| FALSE} | BOOL | Boolean literals are of type BOOL. |
| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | The preceding characters indicate null literals. These can be used with any data type. |
| 6 | TIMESTAMP ['literal' \| "literal"] | TIMESTAMP | The TIMESTAMP keyword indicates that the following string literal is interpreted as a timestamp. The string must be in YYYY-MM-DD HH:mm:ss.MS format. The precision is inherited from the database configuration. |
| 7 | [TRUE \| FALSE] | BOOL | Boolean literals are of type BOOL. |
| 8 | ['' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL ] | -- | The preceding characters indicate null literals. These can be used with any data type. |
:::note
Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.

View File

@ -56,7 +56,7 @@ database_option: {
- WAL_FSYNC_PERIOD: specifies the interval (in milliseconds) at which data is written from the WAL to disk. This parameter takes effect only when the WAL parameter is set to 2. The default value is 3000. Enter a value between 0 and 180000. The value 0 indicates that incoming data is immediately written to disk.
- MAXROWS: specifies the maximum number of rows recorded in a block. The default value is 4096.
- MINROWS: specifies the minimum number of rows recorded in a block. The default value is 100.
- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to three times of the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. TDengine Enterprise supports [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function, thus multiple KEEP values (comma separated and up to 3 values supported, and meet keep 0 <= keep 1 <= keep 2, e.g. KEEP 100h,100d,3650d) are supported; TDengine OSS does not support Tiered Storage function (although multiple keep values are configured, they do not take effect, only the maximum keep value is used as KEEP).
- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to three times of the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. TDengine Enterprise supports [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function, thus multiple KEEP values (comma separated and up to 3 values supported, and meet keep 0 &lt;= keep 1 &lt;= keep 2, e.g. KEEP 100h,100d,3650d) are supported; TDengine OSS does not support Tiered Storage function (although multiple keep values are configured, they do not take effect, only the maximum keep value is used as KEEP).
- PAGES: specifies the number of pages in the metadata storage engine cache on each vnode. Enter a value greater than or equal to 64. The default value is 256. The space occupied by metadata storage on each vnode is equal to the product of the values of the PAGESIZE and PAGES parameters. The space occupied by default is 1 MB.
- PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384.
- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms.

View File

@ -24,7 +24,7 @@ SELECT [hints] [DISTINCT] [TAGS] select_list
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
hint:
BATCH_SCAN | NO_BATCH_SCAN | SORT_FOR_GROUP
BATCH_SCAN | NO_BATCH_SCAN | SORT_FOR_GROUP | PARA_TABLES_SORT
select_list:
select_expr [, select_expr] ...
@ -87,12 +87,13 @@ Hints are a means of user control over query optimization for individual stateme
The list of currently supported Hints is as follows:
| **Hint** | **Params** | **Comment** | **Scopt** |
| **Hint** | **Params** | **Comment** | **Scope** |
| :-----------: | -------------- | -------------------------- | -----------------------------------|
| BATCH_SCAN | None | Batch table scan | JOIN statment for stable |
| NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable |
| SORT_FOR_GROUP| None | Use sort for partition, conflict with PARTITION_FIRST | With normal column in partition by list |
| PARTITION_FIRST| None | Use Partition before aggregate, conflict with SORT_FOR_GROUP | With normal column in partition by list |
| PARA_TABLES_SORT| None | When sorting the supertable rows by timestamp, No temporary disk space is used. When there are numerous tables, each with long rows, the corresponding algorithm associated with this prompt may consume a substantial amount of memory, potentially leading to an Out Of Memory (OOM) situation. | Sorting the supertable rows by timestamp |
For example:
@ -100,6 +101,7 @@ For example:
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
SELECT /*+ SORT_FOR_GROUP() */ count(*), c1 FROM stable1 PARTITION BY c1;
SELECT /*+ PARTITION_FIRST() */ count(*), c1 FROM stable1 PARTITION BY c1;
SELECT /*+ PARA_TABLES_SORT() */ * from stable1 order by ts;
```
## Lists

View File

@ -491,6 +491,8 @@ TO_CHAR(ts, format_str_literal)
**Description**: Convert a ts column to string as the format specified
**Version**: Since ver-3.2.2.0
**Return value type**: VARCHAR
**Applicable column types**: TIMESTAMP
@ -550,6 +552,8 @@ TO_TIMESTAMP(ts_str_literal, format_str_literal)
**Description**: Convert a formated timestamp string to a timestamp
**Version**: Since ver-3.2.2.0
**Return value type**: TIMESTAMP
**Applicable column types**: VARCHAR
@ -877,11 +881,11 @@ HISTOGRAM(expr, bin_type, bin_description, normalized)
- "user_input": "[1, 3, 5, 7]":
User specified bin values.
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
- "linear_bin": "&lcub;"start": 0.0, "width": 5.0, "count": 5, "infinity": true&rcub;"
"start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add (-inf, inf) as start/end point in generated set of bins.
The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf].
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
- "log_bin": "&lcub;"start":1.0, "factor": 2.0, "count": 5, "infinity": true&rcub;"
"start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add (-inf, inf) as start/end point in generated range of bins.
The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf].
- normalized: setting to 1/0 to turn on/off result normalization. Valid values are 0 or 1.
@ -977,7 +981,7 @@ ignore_null_values: {
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
- `INTERP` must be used along with `RANGE`, `EVERY`, `FILL` keywords.
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range. timestamp2 is the ending point of the output time range.
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 &lt;= timestamp2. timestamp1 is the starting point of the output time range. timestamp2 is the ending point of the output time range.
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
- When only one timestamp value is specified in `RANGE` clause, `INTERP` is used to generate interpolation at this point in time. In this case, `EVERY` clause can be omitted. For example, SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear).

View File

@ -67,7 +67,7 @@ If a stream is created with PARTITION BY clause and SUBTABLE clause, the name of
CREATE STREAM avg_vol_s INTO avg_vol SUBTABLE(CONCAT('new-', tname)) AS SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname tname INTERVAL(1m);
```
IN PARTITION clause, 'tbname', representing each subtable name of source supertable, is given alias 'tname'. And 'tname' is used in SUBTABLE clause. In SUBTABLE clause, each auto created subtable will concat 'new-' and source subtable name as their name. Other expressions are also allowed in SUBTABLE clause, but the output type must be varchar.
IN PARTITION clause, 'tbname', representing each subtable name of source supertable, is given alias 'tname'. And 'tname' is used in SUBTABLE clause. In SUBTABLE clause, each auto created subtable will concat 'new-' and source subtable name as their name(Starting from 3.2.3.0, in order to avoid the expression in subtable being unable to distinguish between different subtables, add '_groupId' to the end of subtable name).
If the output length exceeds the limitation of TDengine(192), the name will be truncated. If the generated name is occupied by some other table, the creation and writing of the new subtable will be failed.

View File

@ -35,9 +35,9 @@ TDengine supports the `UNION` and `UNION ALL` operations. UNION ALL collects all
| # | **Operator** | **Supported Data Types** | **Description** |
| --- | :---------------: | -------------------------------------------------------------------- | -------------------- |
| 1 | = | All types except BLOB, MEDIUMBLOB, and JSON | Equal to |
| 2 | <\>, != | All types except BLOB, MEDIUMBLOB, and JSON; the primary key (timestamp) is also not supported | Not equal to |
| 3 | \>, < | All types except BLOB, MEDIUMBLOB, and JSON | Greater than and less than |
| 4 | \>=, <= | All types except BLOB, MEDIUMBLOB, and JSON | Greater than or equal to and less than or equal to |
| 2 | &lt;&gt;, != | All types except BLOB, MEDIUMBLOB, and JSON; the primary key (timestamp) is also not supported | Not equal to |
| 3 | &gt;, &lt; | All types except BLOB, MEDIUMBLOB, and JSON | Greater than and less than |
| 4 | &gt;=, &lt;= | All types except BLOB, MEDIUMBLOB, and JSON | Greater than or equal to and less than or equal to |
| 5 | IS [NOT] NULL | All types | Indicates whether the value is null |
| 6 | [NOT] BETWEEN AND | All types except BLOB, MEDIUMBLOB, JSON and GEOMETRY | Closed interval comparison |
| 7 | IN | All types except BLOB, MEDIUMBLOB, and JSON; the primary key (timestamp) is also not supported | Equal to any value in the list |

View File

@ -7,14 +7,14 @@ description: This document describes the usage of escape characters in TDengine.
| Escape Character | **Actual Meaning** |
| :--------------: | ------------------------ |
| `\'` | Single quote ' |
| `\"` | Double quote " |
| \n | Line Break |
| \r | Carriage Return |
| \t | tab |
| `\\` | Back Slash \ |
| `\%` | % see below for details |
| `\_` | \_ see below for details |
| `\'` | Single quote `'` |
| `\"` | Double quote `"` |
| `\n` | Line Break |
| `\r` | Carriage Return |
| `\t` | tab |
| `\\` | Back Slash `\ ` |
| `\%` | `%` see below for details |
| `\_` | `_` see below for details |
## Restrictions
@ -22,5 +22,5 @@ description: This document describes the usage of escape characters in TDengine.
- Identifier without ``: Error will be returned because identifier must be constituted of digits, ASCII characters or underscore and can't be started with digits
- Identifier quoted with ``: Original content is kept, no escaping
2. If there are escape characters in values
- The escape characters will be escaped as the above table. If the escape character doesn't match any supported one, the escape character "\" will be ignored.
- "%" and "\_" are used as wildcards in `like`. `\%` and `\_` should be used to represent literal "%" and "\_" in `like`,. If `\%` and `\_` are used out of `like` context, the evaluation result is "`\%`"and "`\_`", instead of "%" and "\_".
- The escape characters will be escaped as the above table. If the escape character doesn't match any supported one, the escape character `\ ` will be ignored(`\x` remaining).
- `%` and `_` are used as wildcards in `like`. `\%` and `\_` should be used to represent literal `%` and `_` in `like`. If `\%` and `\_` are used out of `like` context, the evaluation result is `\%` and `\_`, instead of `%` and `_`.

View File

@ -71,7 +71,7 @@ The following data types can be used in the schema for standard tables.
| 44 | SHOW STREAMS | Modified | This statement previously showed continuous queries. The continuous query feature has been replaced with the stream processing feature. This statement now shows streams that have been created.
| 45 | SHOW SUBSCRIPTIONS | Added | Shows all subscriptions in the current database.
| 46 | SHOW TABLES | Modified | Only shows table names.
| 47 | SHOW TABLE DISTRIBUTED | Added | Shows how table data is distributed. This replaces the `SELECT _block_dist() FROM { tb_name | stb_name }` command.
| 47 | SHOW TABLE DISTRIBUTED | Added | Shows how table data is distributed. This replaces the `SELECT _block_dist() FROM &lcub; tb_name | stb_name &rcub;` command.
| 48 | SHOW TOPICS | Added | Shows all subscribed topics in the current database.
| 49 | SHOW TRANSACTIONS | Added | Shows all running transactions in the system.
| 50 | SHOW DNODE VARIABLES | Added | Shows the configuration of the specified dnode.

View File

@ -15,7 +15,7 @@ Diagnostic steps:
2. On the server side, execute command `taos -n server -P <port> -l <pktlen>` to monitor the port range starting from the port specified by `-P` parameter with the role of "server".
3. On the client side, execute command `taos -n client -h <fqdn of server> -P <port> -l <pktlen>` to send a testing package to the specified server and port.
-l <pktlen\>: The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
-l &lt;pktlen&gt;: The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
Please note that the package length must be same in the above 2 commands executed on server side and client side respectively.
Output of the server side for the example is below:
@ -63,7 +63,7 @@ Once this parameter is set to 135 or 143, the log file grows very quickly especi
## Client Log
An independent log file, named as "taoslog+<seq num\>" is generated for each client program, i.e. a client process. The parameter `debugFlag` is used to control the log level. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively.
An independent log file, named as "taoslog+&lt;seq num&gt;" is generated for each client program, i.e. a client process. The parameter `debugFlag` is used to control the log level. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively.
The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded. As stated above, for debugging and tracing, it needs to be changed to 135 or 143 respectively, so that logs at DEBUG or TRACE level can be recorded.

View File

@ -81,7 +81,7 @@ Parameter Description:
:::note
URL Encoding. Make sure that parameters are properly encoded. For example, when specifying a timezone you must properly encode special characters. ?tz=Etc/GMT+10 will not work because the <+> plus symbol is recognized as a space in the url. It's best practice to encode all special characters in a parameter. Instead use ?tz=Etc%2FGMT%2B10 for the parameter.
URL Encoding. Make sure that parameters are properly encoded. For example, when specifying a timezone you must properly encode special characters. ?tz=Etc/GMT+10 will not work because the + plus symbol is recognized as a space in the url. It's best practice to encode all special characters in a parameter. Instead use ?tz=Etc%2FGMT%2B10 for the parameter.
:::

View File

@ -166,8 +166,8 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl
- Compatible with InfluxDB v1 write interface
[https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/)
- Compatible with OpenTSDB JSON and telnet format writes
- <http://opentsdb.net/docs/build/html/api_http/put.html>
- <http://opentsdb.net/docs/build/html/api_telnet/put.html>
- [http://opentsdb.net/docs/build/html/api_http/put.html](http://opentsdb.net/docs/build/html/api_http/put.html)
- [http://opentsdb.net/docs/build/html/api_telnet/put.html](http://opentsdb.net/docs/build/html/api_telnet/put.html)
- Seamless connection to collectd
collectd is a system statistics collection daemon, please visit [https://collectd.org/](https://collectd.org/) for more information.
- Seamless connection with StatsD

View File

@ -94,67 +94,67 @@ taosBenchmark -f <json file>
## Command-line argument in detail
- **-f/--file <json file\>** :
- **-f/--file &lt;json file&gt;** :
specify the configuration file to use. This file includes All parameters. Users should not use this parameter with other parameters on the command-line. There is no default value.
- **-c/--config-dir <dir\>** :
- **-c/--config-dir &lt;dir&gt;** :
specify the directory where the TDengine cluster configuration file. The default path is `/etc/taos`.
- **-h/--host <host\>** :
- **-h/--host &lt;host&gt;** :
Specify the FQDN of the TDengine server to connect to. The default value is localhost.
- **-P/--port <port\>** :
- **-P/--port &lt;port&gt;** :
The port number of the TDengine server to connect to, the default value is 6030.
- **-I/--interface <insertMode\>** :
- **-I/--interface &lt;insertMode&gt;** :
Insert mode. Options are taosc, rest, stmt, sml, sml-rest, corresponding to normal write, restful interface writing, parameter binding interface writing, schemaless interface writing, RESTful schemaless interface writing (provided by taosAdapter). The default value is taosc.
- **-u/--user <user\>** :
- **-u/--user &lt;user&gt;** :
User name to connect to the TDengine server. Default is root.
- **-U/--supplement-insert ** :
Supplementally insert data without create database and table, optional, default is off.
- **-p/--password <passwd\>** :
- **-p/--password &lt;passwd&gt;** :
The default password to connect to the TDengine server is `taosdata`.
- **-o/--output <file\>** :
- **-o/--output &lt;file&gt;** :
specify the path of the result output file, the default value is `. /output.txt`.
- **-T/--thread <threadNum\>** :
- **-T/--thread &lt;threadNum&gt;** :
The number of threads to insert data. Default is 8.
- **-B/--interlace-rows <rowNum\>** :
- **-B/--interlace-rows &lt;rowNum&gt;** :
Enables interleaved insertion mode and specifies the number of rows of data to be inserted into each child table. Interleaved insertion mode means inserting the number of rows specified by this parameter into each sub-table and repeating the process until all sub-tables have been inserted. The default value is 0, i.e., data is inserted into one sub-table before the next sub-table is inserted.
- **-i/--insert-interval <timeInterval\>** :
- **-i/--insert-interval &lt;timeInterval&gt;** :
Specify the insert interval in `ms` for interleaved insert mode. The default value is 0. It only works if `-B/--interlace-rows` is greater than 0. After inserting interlaced rows for each child table, the data insertion thread will wait for the interval specified by this value before proceeding to the next round of writes.
- **-r/--rec-per-req <rowNum\>** :
- **-r/--rec-per-req &lt;rowNum&gt;** :
Writing the number of rows of records per request to TDengine, the default value is 30000.
- **-t/--tables <tableNum\>** :
- **-t/--tables &lt;tableNum&gt;** :
Specify the number of sub-tables. The default is 10000.
- **-S/--timestampstep <stepLength\>** :
- **-S/--timestampstep &lt;stepLength&gt;** :
Timestamp step for inserting data in each child table in ms, default is 1.
- **-n/--records <recordNum\>** :
- **-n/--records &lt;recordNum&gt;** :
The default value of the number of records inserted in each sub-table is 10000.
- **-d/--database <dbName\>** :
- **-d/--database &lt;dbName&gt;** :
The name of the database used, the default value is `test`.
- **-b/--data-type <colType\>** :
- **-b/--data-type &lt;colType&gt;** :
specify the type of the data columns of the super table. It defaults to three columns of type FLOAT, INT, and FLOAT if not used.
- **-l/--columns <colNum\>** :
- **-l/--columns &lt;colNum&gt;** :
specify the number of columns in the super table. If both this parameter and `-b/--data-type` is set, the final result number of columns is the greater of the two. If the number specified by this parameter is greater than the number of columns specified by `-b/--data-type`, the unspecified column type defaults to INT, for example: `-l 5 -b float,double`, then the final column is `FLOAT,DOUBLE,INT,INT,INT`. If the number of columns specified is less than or equal to the number of columns specified by `-b/--data-type`, then the result is the column and type specified by `-b/--data-type`, e.g.: `-l 3 -b float,double,float,bigint`. The last column is `FLOAT,DOUBLE, FLOAT,BIGINT`.
- **-L/--partial-col-num <colNum\> ** :
- **-L/--partial-col-num &lt;colNum&gt; ** :
Specify first numbers of columns has data. Rest of columns' data are NULL. Default is all columns have data.
- **-A/--tag-type <tagType\>** :
- **-A/--tag-type &lt;tagType&gt;** :
The tag column type of the super table. nchar and binary types can both set the length, for example:
```
@ -168,10 +168,10 @@ Note: In some shells, such as bash, "()" needs to be escaped, so the above comma
taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
```
- **-w/--binwidth <length\>**:
- **-w/--binwidth &lt;length&gt;**:
specify the default length for nchar and binary types. The default value is 64.
- **-m/--table-prefix <tablePrefix\>** :
- **-m/--table-prefix &lt;tablePrefix&gt;** :
The prefix of the sub-table name, the default value is "d".
- **-E/--escape-character** :
@ -192,25 +192,25 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
- **-y/--answer-yes** :
Switch parameter that requires the user to confirm at the prompt to continue. The default value is false.
- **-O/--disorder <Percentage\>** :
- **-O/--disorder &lt;Percentage&gt;** :
Specify the percentage probability of disordered data, with a value range of [0,50]. The default is 0, i.e., there is no disordered data.
- **-R/--disorder-range <timeRange\>** :
- **-R/--disorder-range &lt;timeRange&gt;** :
Specify the timestamp range for the disordered data. It leads the resulting disorder timestamp as the ordered timestamp minus a random value in this range. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0.
- **-F/--prepared_rand <Num\>** :
- **-F/--prepared_rand &lt;Num&gt;** :
Specify the number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
- **-a/--replica <replicaNum\>** :
- **-a/--replica &lt;replicaNum&gt;** :
Specify the number of replicas when creating the database. The default value is 1.
- **-k/--keep-trying <NUMBER\>** :
- **-k/--keep-trying &lt;NUMBER&gt;** :
Keep trying if failed to insert, default is no. Available with v3.0.9+.
- **-z/--trying-interval <NUMBER\>** :
- **-z/--trying-interval &lt;NUMBER&;gt;** :
Specify interval between keep trying insert. Valid value is a positive number. Only valid when keep trying be enabled. Available with v3.0.9+.
- **-v/--vgroups <NUMBER\>** :
- **-v/--vgroups &lt;NUMBER&gt;** :
Specify vgroups number for creating a database, only valid with daemon version 3.0+
- **-V/--version** :
@ -226,7 +226,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
The parameters listed in this section apply to all function modes.
- **filetype** : The function to be tested, with optional values `insert`, `query` and `subscribe`. These correspond to the insert, query, and subscribe functions, respectively. Users can specify only one of these in each configuration file.
**cfgdir**: specify the TDengine client configuration file's directory. The default path is /etc/taos.
**cfgdir**: specify the TDengine client configuration file's directory. The default path is `/etc/taos`.
- **host**: Specify the FQDN of the TDengine server to connect. The default value is `localhost`.

View File

@ -289,7 +289,7 @@ A specific type "nchar" is provided in TDengine to store non-ASCII characters su
The characters input on the client side are encoded using the default system encoding, which is UTF-8 on Linux/macOS, or GB18030 or GBK on some systems in Chinese, POSIX in docker, CP936 on Windows in Chinese. The encoding of the operating system in use must be set correctly so that the characters in nchar type can be converted to UCS4-LE.
The locale definition standard on Linux/macOS is: <Language\>\_<Region\>.<charset\>, for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. The charset indicates how to display the characters. On Linux/macOS, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux/macOS to specify the charset.
The locale definition standard on Linux/macOS is: &lt;Language&gt;\_&lt;Region&gt;.&lt;charset&gt;, for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. The charset indicates how to display the characters. On Linux/macOS, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux/macOS to specify the charset.
:::

View File

@ -84,18 +84,22 @@ Schemaless writes process row data according to the following principles.
1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string:
```json
"measurement,tag_key1=tag_value1,tag_key2=tag_value2"
```
```json
"measurement,tag_key1=tag_value1,tag_key2=tag_value2"
```
:::tip
Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol.
The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t\_" is a fixed prefix that every table generated by this mapping relationship has.
:::
:::tip
Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol.
The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t\_" is a fixed prefix that every table generated by this mapping relationship has.
:::
If you do not want to use an automatically generated table name, there are two ways to specify sub table names, the first one has a higher priority.
You can configure smlAutoChildTableNameDelimiter in taos.cfg(except for `@ # space \r \t \n`), for example, `smlAutoChildTableNameDelimiter=tname`. You can insert `st,t0=cpul,t1=4 c1=3 1626006833639000000` and the table name will be cpu1-4.
You can configure smlChildTableName in taos.cfg to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
If you do not want to use an automatically generated table name, there are two ways to specify sub table names(the first one has a higher priority).
1. You can configure smlAutoChildTableNameDelimiter in taos.cfg(except for `@ # space \r \t \n`).
1. For example, `smlAutoChildTableNameDelimiter=tname`. You can insert `st,t0=cpul,t1=4 c1=3 1626006833639000000` and the table name will be cpu1-4.
2. You can configure smlChildTableName in taos.cfg to specify table names.
2. For example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
2. If the super table obtained by parsing the line protocol does not exist, this super table is created.
**Important:** Manually creating supertables for schemaless writing is not supported. Schemaless writing creates appropriate supertables automatically.

View File

@ -36,7 +36,7 @@ LoadPlugin network
</Plugin>
```
where <taosAdapter's host\> fills in the server's domain name or IP address running taosAdapter. <port for collectd direct\> fills in the port that taosAdapter uses to receive collectd data (default is 6045).
where &lt;taosAdapter's host&gt; fills in the server's domain name or IP address running taosAdapter. &lt;port for collectd direct&gt; fills in the port that taosAdapter uses to receive collectd data (default is 6045).
An example is as follows.
@ -62,7 +62,7 @@ LoadPlugin write_tsdb
</Plugin>
```
Where <taosAdapter's host\> is the domain name or IP address of the server running taosAdapter. <port for collectd write_tsdb plugin\> Fill in the data that taosAdapter uses to receive the collectd write_tsdb plugin (default is 6047).
Where &lt;taosAdapter's host&gt; is the domain name or IP address of the server running taosAdapter. &lt;port for collectd write_tsdb plugin&gt; Fill in the data that taosAdapter uses to receive the collectd write_tsdb plugin (default is 6047).
```text
LoadPlugin write_tsdb

View File

@ -26,7 +26,7 @@ The default database name written by the taosAdapter is `icinga2`. You can also
### Configure icinga3
- Enable opentsdb-writer for icinga2 (refer to the link https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer)
- Modify the configuration file `/etc/icinga2/features-enabled/opentsdb.conf` by filling in <taosAdapter's host\> as the domain name or IP address of the server running taosAdapter and <port for icinga2\> as the corresponding port on which taosAdapter supports receiving icinga2 data (default is 6048)
- Modify the configuration file `/etc/icinga2/features-enabled/opentsdb.conf` by filling in &lt;taosAdapter's host&gt; as the domain name or IP address of the server running taosAdapter and &lt;port for icinga2&gt; as the corresponding port on which taosAdapter supports receiving icinga2 data (default is 6048)
```
object OpenTsdbWriter "opentsdb" {

View File

@ -9,8 +9,8 @@ Point the `remote_read url` and `remote_write url` to the domain name or IP addr
### Configure Basic authentication
- username: <TDengine's username>
- password: <TDengine's password>
- username: TDengine's username
- password: TDengine's password
### Example configuration of remote_write and remote_read related sections in prometheus.yml file

View File

@ -31,7 +31,7 @@ The default database name written by taosAdapter is `statsd`. To specify a diffe
### Configuring StatsD
To use StatsD, you need to download its [source code](https://github.com/statsd/statsd). Please refer to the example file `exampleConfig.js` in the root directory of the source download to modify the configuration file. In <taosAdapter's host\>, please fill in the domain name or IP address of the server running taosAdapter, and <port for StatsD\>, please fill in the port where taosAdapter receives StatsD data (default is 6044).
To use StatsD, you need to download its [source code](https://github.com/statsd/statsd). Please refer to the example file `exampleConfig.js` in the root directory of the source download to modify the configuration file. In &lt;taosAdapter's host&gt;, please fill in the domain name or IP address of the server running taosAdapter, and &lt;port for StatsD&gt;, please fill in the port where taosAdapter receives StatsD data (default is 6044).
```
backends section add ". /backends/repeater"

View File

@ -10,7 +10,7 @@ In the Telegraf configuration file (default location `/etc/telegraf/telegraf.con
...
```
Where <taosAdapter's host\> please fill in the server's domain name or IP address running the taosAdapter service. <REST service port\> please fill in the port of the REST service (default is 6041). <TDengine's username\> and <TDengine's password\> please fill in the actual configuration of the currently running TDengine. And <database name\> please fill in the database name where you want to store Telegraf data in TDengine.
Where &lt;taosAdapter's host&gt; please fill in the server's domain name or IP address running the taosAdapter service. &lt;REST service port&gt; please fill in the port of the REST service (default is 6041). &lt;TDengine's username&gt; and &lt;TDengine's password&gt; please fill in the actual configuration of the currently running TDengine. And &lt;database name&gt; please fill in the database name where you want to store Telegraf data in TDengine.
An example is as follows.

View File

@ -23,7 +23,7 @@ Record these values:
## Installing Grafana
TDengine currently supports Grafana versions 7.5 and above. Users can go to the Grafana official website to download the installation package and execute the installation according to the current operating system. The download address is as follows: <https://grafana.com/grafana/download>.
TDengine currently supports Grafana versions 7.5 and above. Users can go to the Grafana official website to download the installation package and execute the installation according to the current operating system. The download address is as follows: [https://grafana.com/grafana/download](https://grafana.com/grafana/download).
## Configuring Grafana
@ -59,7 +59,7 @@ bash -c "$(curl -fsSL \
-p taosdata
```
Restart Grafana service and open Grafana in web-browser, usually <http://localhost:3000>.
Restart Grafana service and open Grafana in web-browser, usually `http://localhost:3000`.
Save the script and type `./install.sh --help` for the full usage of the script.
@ -181,7 +181,7 @@ You can setup a zero-configuration stack for TDengine + Grafana by [docker-compo
3. Start TDengine and Grafana services: `docker-compose up -d`.
Open Grafana <http://localhost:3000>, and you can add dashboard with TDengine now.
Open Grafana (http://localhost:3000), and you can add dashboard with TDengine now.
</TabItem>
</Tabs>
@ -202,7 +202,7 @@ As shown above, select the `TDengine` data source in the `Query` and enter the c
:::note
Since the REST connection because is stateless. Grafana plugin can use <db_name>.<table_name> in the SQL command to specify the database name.
Since the REST connection because is stateless. Grafana plugin can use &lt;db_name&gt;.&lt;table_name&gt; in the SQL command to specify the database name.
:::

View File

@ -345,7 +345,7 @@ The following configuration items apply to TDengine Sink Connector and TDengine
### TDengine Sink Connector specific configuration
1. `connection.database`: The name of the target database. If the specified database does not exist, it will be created automatically. The time precision used for automatic library building is nanoseconds. The default value is null. When it is NULL, refer to the description of the `connection.database.prefix` parameter for the naming rules of the target database
2. `connection.database.prefix`: When `connection.database` is null, the prefix of the target database. Can contain placeholder '${topic}'. For example, kafka_${topic}, for topic 'orders' will be written to database 'kafka_orders'. Default null. When null, the name of the target database is the same as the name of the topic.
2. `connection.database.prefix`: When `connection.database` is null, the prefix of the target database. Can contain placeholder '$&lcub;topic&rcub;'. For example, kafka_$&lcub;topic&rcub;, for topic 'orders' will be written to database 'kafka_orders'. Default null. When null, the name of the target database is the same as the name of the topic.
3. `batch.size`: Write the number of records in each batch in batches. When the data received by the sink connector at one time is larger than this value, it will be written in some batches.
4. `max.retries`: The maximum number of retries when an error occurs. Defaults to 1.
5. `retry.backoff.ms`: The time interval for retry when sending an error. The unit is milliseconds. The default is 3000.
@ -370,12 +370,12 @@ The following configuration items apply to TDengine Sink Connector and TDengine
## Other notes
1. To use Kafka Connect, refer to <https://kafka.apache.org/documentation/#connect>.
1. To use Kafka Connect, refer to [https://kafka.apache.org/documentation/#connect](https://kafka.apache.org/documentation/#connect).
## Feedback
<https://github.com/taosdata/kafka-connect-tdengine/issues>
[https://github.com/taosdata/kafka-connect-tdengine/issues](https://github.com/taosdata/kafka-connect-tdengine/issues)
## Reference
1. For more information, see <https://kafka.apache.org/documentation/>
1. For more information, see [https://kafka.apache.org/documentation/](https://kafka.apache.org/documentation/).

View File

@ -15,43 +15,80 @@ import Node from "./_sub_node.mdx";
import CSharp from "./_sub_cs.mdx";
import CDemo from "./_sub_c.mdx";
为了帮助应用实时获取写入 TDengine 的数据或者以事件到达顺序处理数据TDengine 提供了类似消息队列产品的数据订阅、消费接口。这样在很多场景下,采用 TDengine 的时序数据处理系统不再需要集成消息队列产品,比如 kafka, 从而简化系统设计的复杂度,降低运营维护成本。
与 kafka 一样,你需要定义 *topic*, 但 TDengine 的 *topic* 是基于一个已经存在的超级表、子表或普通表的查询条件,即一个 `SELECT` 语句。你可以使用 SQL 对标签、表名、列、表达式等条件进行过滤,以及对数据进行标量函数与 UDF 计算(不包括数据聚合)。与其他消息队列软件相比,这是 TDengine 数据订阅功能的最大的优势,它提供了更大的灵活性,数据的颗粒度可以由应用随时调整,而且数据的过滤与预处理交给 TDengine而不是应用完成有效的减少传输的数据量与应用的复杂度
为了帮助应用实时获取写入 TDengine 的数据或者以事件到达顺序处理数据TDengine 提供了类似 kafka 的数据订阅功能。这样在很多场景下,采用 TDengine 的时序数据处理系统不再需要集成消息队列产品,比如 kafka, 从而简化系统设计的复杂度,降低运营维护成本
消费者订阅 *topic* 后,可以实时获得最新的数据。多个消费者可以组成一个消费者组 (consumer group), 一个消费者组里的多个消费者共享消费进度,便于多线程、分布式地消费数据,提高消费速度。但不同消费者组中的消费者即使消费同一个 topic, 并不共享消费进度。一个消费者可以订阅多个 topic。如果订阅的是超级表数据可能会分布在多个不同的 vnode 上,也就是多个 shard 上这样一个消费组里有多个消费者可以提高消费效率。TDengine 的消息队列提供了消息的 ACK 机制,在宕机、重启等复杂环境下确保 at least once 消费。
# 介绍
## 主题
与 kafka 一样,你需要定义 topic, TDengine 的 topic 有三种,可以是数据库,超级表,或者一个 `SELECT` 语句,具体的语法参见 [CREATE TOPIC](../../taos-sql/tmq)。与其他消息队列软件相比,这是 TDengine 数据订阅功能的最大的优势,它提供了更大的灵活性,数据的颗粒度可以由应用随时调整,而且数据的过滤与预处理交给 TDengine而不是应用完成有效的减少传输的数据量与应用的复杂度。
为了实现上述功能TDengine 会为 WAL (Write-Ahead-Log) 文件自动创建索引以支持快速随机访问,并提供了灵活可配置的文件切换与保留机制:用户可以按需指定 WAL 文件保留的时间以及大小(详见 create database 语句)。通过以上方式将 WAL 改造成了一个保留事件到达顺序的、可持久化的存储引擎(但由于 TSDB 具有远比 WAL 更高的压缩率,我们不推荐保留太长时间,一般来说,不超过几天)。 对于以 topic 形式创建的查询TDengine 将对接 WAL 而不是 TSDB 作为其存储引擎。在消费时TDengine 根据当前消费进度从 WAL 直接读取数据,并使用统一的查询引擎实现过滤、变换等操作,将数据推送给消费者。
如下图,每个 topic 涉及到的数据表可能分布在多个 vnode相当于 kafka 里的 partition 上,每个 vnode 上的数据保存在 WAL(Write-Ahead-Log) 文件中WAL 文件里的数据是顺序写入的(由于 WAL 文件中存储的不只有数据,还有元数据,写入消息等,所以数据的版本号不是连续的)
下面为关于数据订阅的一些说明需要对TDengine的架构有一些了解结合各个语言链接器的接口使用。(可使用时再了解)
- 一个消费组消费同一个topic下的所有数据不同消费组之间相互独立
- 一个消费组消费同一个topic所有的vgroup消费组可由多个消费者组成但一个vgroup仅被一个消费者消费如果消费者数量超过了vgroup数量多余的消费者不消费数据
- 在服务端每个vgroup仅保存一个offset每个vgroup的offset是单调递增的但不一定连续。各个vgroup的offset之间没有关联
- 每次poll服务端会返回一个结果block该block属于一个vgroup可能包含多个wal版本的数据可以通过 offset 接口获得是该block第一条记录的offset
- 一个消费组如果从未commit过offset当其成员消费者重启重新拉取数据时均从参数auto.offset.reset设定值开始消费在一个消费者生命周期中客户端本地记录了最近一次拉取数据的offset不会拉取重复数据
- 消费者如果异常终止没有调用tmq_close需等约12秒后触发其所属消费组rebalance该消费者在服务端状态变为LOST约1天后该消费者自动被删除正常退出退出后就会删除消费者新增消费者需等约2秒触发rebalance该消费者在服务端状态变为ready
- 消费组rebalance会对该组所有ready状态的消费者成员重新进行vgroup分配消费者仅能对自己负责的vgroup进行assignment/seek/commit/poll操作
- 消费者可利用 position 获得当前消费的offset并seek到指定offset重新消费
- seek将position指向指定offset不执行commit操作一旦seek成功可poll拉取指定offset及以后的数据
- seek 操作之前须调用 assignment 接口获取该consumer的vgroup ID和offset范围。seek 操作会检测vgroup ID 和 offset是否合法如非法将报错
- position是获取当前的消费位置是下次要取的位置不是当前消费到的位置
- commit是提交消费位置不带参数的话是提交当前消费位置下次要取的位置不是当前消费到的位置带参数的话是提交参数里的位置也即下次退出重启后要取的位置
- seek是设置consumer消费位置seek到哪position就返回哪都是下次要取的位置
- seek不会影响commitcommit不影响seek相互独立两个是不同的概念
- begin接口为wal 第一条数据的offsetend 接口为wal 最后一条数据的offset + 1
- offset接口获取的是记录所在结果block块里的第一条数据的offset当seek至该offset时将消费到这个block里的全部数据。参见第四点
- 由于存在 WAL 过期删除机制即使seek 操作成功poll数据时有可能offset已失效。如果poll 的offset 小于 WAL 最小版本号将会从WAL最小版本号消费
- 数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似;
![img_5.png](img_5.png)
本文档不对消息队列本身的知识做更多的介绍,如果需要了解,请自行搜索
TDengine 会为 WAL 文件自动创建索引以支持快速随机访问,并提供了灵活可配置的文件切换与保留机制,用户可以按需指定 WAL 文件保留的时间以及大小(详见 [CREATE DATABASE](../../taos-sql/database) 语句,由于消费是通过 WAL 实现的,所以应该根据写入消费速度来确定 WAL 的保存时长)。通过以上方式将 WAL 改造成了一个保留事件到达顺序的、可持久化的存储引擎。
说明:
对于 `SELECT` 语句形式的 topic在消费时TDengine 根据当前消费进度从 WAL 直接读取数据,并使用统一的查询引擎实现过滤、变换等操作,将数据推送给消费者。
## 生产者
写入 topic 相关联的数据表中数据的都是生产者,生产者实际生产的数据写入到了子表或普通表中,即表所在 vnode 的 WAL 里。
## 消费者
### 消费者组
消费者订阅 topic 后,可以消费 topic 里的所有数据(这些数据所在的表可能分布在多个 vnode 上,即 db 所在的所有 vnode。订阅 topic 时,需要指定一个消费者组 (consumer group),如果这个消费者组里只有一个消费者,那么这个消费者会顺序的消费这些 vnode 上的数据。
为了提高消费速度,便于多线程、分布式地消费数据,可以在一个消费组里添加多个消费者,这些消费者将均分数据所在的 vnode 进行消费(比如数据分布在 4 个 vnode 上,有 2 个消费者的话,那么每个消费者消费 2 个 vnode有 3 个消费者的话2 个消费者各消费 1 个 vnode1 个消费者消费 2 个 vnode有 5 个消费者的话4 个各分配 1 个 vnode 消费,另外 1 个不消费),如下图:
![img_6.png](img_6.png)
在一个消费组里添加一个消费者后,在 Mnode 上通过 rebalance 的机制实现消费者的重新分配,该操作对用户是透明的。
一个消费者可以订阅多个 topic。TDengine 的数据订阅在宕机、重启等复杂环境下确保 at least once 消费。
### 消费进度
在 topic 的一个消费组的一个 vnode 上有消费进度。消费者消费的同时,可以提交消费进度,消费进度即 vnode 上 WAL 的版本号(对于 kafka 里的 offset消费进度可以手动提交也可以通过参数auto.commit.interval.ms设置为周期性自动提交。
首次消费数据时通过订阅参数auto.offset.reset来确定消费位置为最新数据latest还是最旧数据earliest
消费进度在一个 vnode 上对于同一个 topic 和 消费者组是唯一的。所以如果同一个 topic 和 消费者组在一个 vnode 上的消费者退出了,并且提交了消费进度。然后同一个 topic 和 消费者组里重新建了一个新的消费者消费这个 vnode那么这个新消费者将继承之前的消费进度继续消费。
如果之前的消费者没有提交消费进度那个新的消费者将根据订阅参数auto.offset.reset设置的值来确定起始消费位置。
不同消费者组中的消费者即使消费同一个 topic, 并不共享消费进度。
![img_7.png](img_7.png)
作为一个数据库产品, WAL 文件中存储的不全是数据,也包括其他写入消息,元数据等,所以消费进度不是连续的。
##说明
从3.2.0.0版本开始数据订阅支持vnode迁移和分裂。
由于数据订阅依赖wal文件而在vnode迁移和分裂的过程中wal并不会同步过去所以迁移或分裂后之前没消费完的wal数据后消费不到。所以请保证之前把数据全部消费完后再进行vnode迁移或分裂否则消费会丢失数据。
## 主要数据结构和 API
由于数据订阅依赖wal文件而在vnode迁移和分裂的过程中wal并不会同步过去所以迁移或分裂后之前没消费完的wal数据后消费不到。所以请保证迁移和分裂之前把数据全部消费完后再进行vnode迁移或分裂否则消费会丢失数据。
不同语言下, TMQ 订阅相关的 API 及数据结构如下注意consumer结构不是线程安全的在一个线程使用consumer时不要在另一个线程close这个consumer
# 语法说明
具体的语法参见 [数据订阅](../../taos-sql/tmq)
# 消费参数
消费参数主要用于消费者创建时指定,基础配置项如下表所示:
| 参数名称 | 类型 | 参数说明 | 备注 |
| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
| `td.connect.ip` | string | 服务端的 IP 地址 | |
| `td.connect.user` | string | 用户名 | |
| `td.connect.pass` | string | 密码 | |
| `td.connect.port` | integer | 服务端的端口号 | |
| `group.id` | string | 消费组 ID同一消费组共享消费进度 | <br />**必填项**。最大长度192。<br />每个topic最多可建立100个 consumer group |
| `client.id` | string | 客户端 ID | 最大长度192。 |
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default(version < 3.2.0.0);从头开始订阅; <br/>`latest`: default(version >= 3.2.0.0);仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
| `enable.auto.commit` | boolean | 是否启用消费位点自动提交true: 自动提交客户端应用无需commitfalse客户端应用需要自行commit | 默认值为 true |
| `auto.commit.interval.ms` | integer | 消费记录自动提交消费位点时间间隔,单位为毫秒 | 默认值为 5000 |
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句从3.2.0.0版本该参数废弃恒为true |默认关闭 |
| `enable.replay` | boolean | 是否开启数据回放功能 |默认关闭 |
# 主要数据结构和 API 接口
不同语言下, TMQ 订阅相关的 API 及数据结构如下详细的接口说明可以参考连接器章节注意consumer结构不是线程安全的在一个线程使用consumer时不要在另一个线程close这个consumer
<Tabs defaultValue="java" groupId="lang">
<TabItem value="c" label="C">
@ -273,6 +310,7 @@ void Close()
</TabItem>
</Tabs>
# 数据订阅示例
## 写入数据
首先完成建库、建一张超级表和多张子表操作,然后就可以写入数据了,比如:
@ -282,82 +320,20 @@ DROP DATABASE IF EXISTS tmqdb;
CREATE DATABASE tmqdb WAL_RETENTION_PERIOD 3600;
CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16)) TAGS(t1 INT, t3 VARCHAR(16));
CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0");
CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1");
CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1");
INSERT INTO tmqdb.ctb0 VALUES(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00');
INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
```
## 创建 topic
## 创建 *topic*
TDengine 使用 SQL 创建一个 topic
使用 SQL 创建一个 topic
```sql
CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1;
```
- topic创建个数有上限通过参数 tmqMaxTopicNum 控制,默认 20 个
TMQ 支持多种订阅类型:
### 列订阅
语法:
```sql
CREATE TOPIC topic_name as subquery
```
通过 `SELECT` 语句订阅(包括 `SELECT *`,或 `SELECT ts, c1` 等指定列订阅,可以带条件过滤、标量函数计算,但不支持聚合函数、不支持时间窗口聚合)。需要注意的是:
- 该类型 TOPIC 一旦创建则订阅数据的结构确定。
- 被订阅或用于计算的列或标签不可被删除(`ALTER table DROP`)、修改(`ALTER table MODIFY`)。
- 若发生表结构变更,新增的列不出现在结果中。
### 超级表订阅
语法:
```sql
CREATE TOPIC topic_name [with meta] AS STABLE stb_name [where_condition]
```
与 `SELECT * from stbName` 订阅的区别是:
- 不会限制用户的表结构变更。
- 返回的是非结构化的数据:返回数据的结构会随之超级表的表结构变化而变化。
- with meta 参数可选选择时将返回创建超级表子表等语句主要用于taosx做超级表迁移
- where_condition 参数可选选择时将用来过滤符合条件的子表订阅这些子表。where 条件里不能有普通列只能是tag或tbnamewhere条件里可以用函数用来过滤tag但是不能是聚合函数因为子表tag值无法做聚合。也可以是常量表达式比如 2 > 1订阅全部子表或者 false订阅0个子表
- 返回数据不包含标签。
### 数据库订阅
语法:
```sql
CREATE TOPIC topic_name [with meta] AS DATABASE db_name;
```
通过该语句可创建一个包含数据库所有表数据的订阅
- with meta 参数可选选择时将返回创建数据库里所有超级表子表的语句主要用于taosx做数据库迁移
## 创建消费者 *consumer*
消费者需要通过一系列配置选项创建,基础配置项如下表所示:
| 参数名称 | 类型 | 参数说明 | 备注 |
| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
| `td.connect.ip` | string | 服务端的 IP 地址 | |
| `td.connect.user` | string | 用户名 | |
| `td.connect.pass` | string | 密码 | |
| `td.connect.port` | integer | 服务端的端口号 | |
| `group.id` | string | 消费组 ID同一消费组共享消费进度 | <br />**必填项**。最大长度192。<br />每个topic最多可建立100个 consumer group |
| `client.id` | string | 客户端 ID | 最大长度192。 |
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default(version < 3.2.0.0);从头开始订阅; <br/>`latest`: default(version >= 3.2.0.0);仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
| `enable.auto.commit` | boolean | 是否启用消费位点自动提交true: 自动提交客户端应用无需commitfalse客户端应用需要自行commit | 默认值为 true |
| `auto.commit.interval.ms` | integer | 消费记录自动提交消费位点时间间隔,单位为毫秒 | 默认值为 5000 |
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句从3.2.0.0版本该参数废弃恒为true |默认关闭 |
| `enable.replay` | boolean | 是否开启数据回放功能 |默认关闭 |
对于不同编程语言,其设置方式如下:
<Tabs defaultValue="java" groupId="lang">
@ -520,31 +496,13 @@ var consumer = new ConsumerBuilder<Dictionary<string, object>>(cfg).Build();
</TabItem>
</Tabs>
上述配置中包括 consumer group ID如果多个 consumer 指定的 consumer group ID 一样,则自动形成一个 consumer group共享消费进度。
数据回放功能说明:
- 订阅增加 replay 功能,按照数据写入的时间回放。
比如,如下时间写入三条数据
```sql
2023/09/22 00:00:00.000
2023/09/22 00:00:05.000
2023/09/22 00:00:08.000
```
则订阅出第一条数据 5s 后返回第二条数据,获取第二条数据 3s 后返回第三条数据。
- 仅列订阅支持数据回放
- 回放需要保证独立时间线
- 如果是子表订阅或者普通表订阅只有一个vnode上有数据保证是一个时间线
- 如果超级表订阅,则需保证该 DB 只有一个vnode否则报错因为多个vnode上订阅出的数据不在一个时间线上
- 超级表和库订阅不支持回放
- 增加 enable.replay 参数true表示开启订阅回放功能false表示不开启订阅回放功能默认不开启。
- 回放不支持进度保存,所以回放参数 enable.replay = true 时auto commit 自动关闭
- 因为数据回放本身需要处理时间所以回放的精度存在几十ms的误差
## 订阅 *topics*
一个 consumer 支持同时订阅多个 topic。
<Tabs defaultValue="java" groupId="lang">
<TabItem value="c" label="C">
@ -837,36 +795,7 @@ consumer.Close();
</Tabs>
## 删除 *topic*
如果不再需要订阅数据,可以删除 topic需要注意只有当前未在订阅中的 TOPIC 才能被删除。
```sql
/* 删除 topic */
DROP TOPIC topic_name;
```
## 状态查看
1、*topics*:查询已经创建的 topic
```sql
SHOW TOPICS;
```
2、consumers查询 consumer 的状态及其订阅的 topic
```sql
SHOW CONSUMERS;
```
3、subscriptions查询 consumer 与 vgroup 之间的分配关系
```sql
SHOW SUBSCRIPTIONS;
```
## 示例代码
## 完整示例代码
以下是各语言的完整示例代码。
@ -908,3 +837,22 @@ SHOW SUBSCRIPTIONS;
</TabItem>
</Tabs>
#订阅高级功能
##数据回放
- 订阅支持 replay 功能,按照数据写入的时间回放。
比如,如下时间写入三条数据
```sql
2023/09/22 00:00:00.000
2023/09/22 00:00:05.000
2023/09/22 00:00:08.000
```
则订阅出第一条数据 5s 后返回第二条数据,获取第二条数据 3s 后返回第三条数据。
- 仅查询订阅支持数据回放
- 回放需要保证独立时间线
- 如果是子表订阅或者普通表订阅只有一个vnode上有数据保证是一个时间线
- 如果超级表订阅,则需保证该 DB 只有一个vnode否则报错因为多个vnode上订阅出的数据不在一个时间线上
- 超级表和库订阅不支持回放
- enable.replay 参数true表示开启订阅回放功能false表示不开启订阅回放功能默认不开启。
- 回放不支持进度保存,所以回放参数 enable.replay = true 时auto commit 自动关闭
- 因为数据回放本身需要处理时间所以回放的精度存在几十ms的误差

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

View File

@ -24,7 +24,7 @@ SELECT [hints] [DISTINCT] [TAGS] select_list
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
hint:
BATCH_SCAN | NO_BATCH_SCAN | SORT_FOR_GROUP
BATCH_SCAN | NO_BATCH_SCAN | SORT_FOR_GROUP | PARA_TABLES_SORT
select_list:
select_expr [, select_expr] ...
@ -93,13 +93,14 @@ Hints 是用户控制单个语句查询优化的一种手段,当 Hint 不适
| NO_BATCH_SCAN | 无 | 采用顺序读表的方式 | 超级表 JOIN 语句 |
| SORT_FOR_GROUP| 无 | 采用sort方式进行分组, 与PARTITION_FIRST冲突 | partition by 列表有普通列时 |
| PARTITION_FIRST| 无 | 在聚合之前使用PARTITION计算分组, 与SORT_FOR_GROUP冲突 | partition by 列表有普通列时 |
| PARA_TABLES_SORT| 无 | 超级表的数据按时间戳排序时, 不使用临时磁盘空间, 只使用内存。当子表数量多, 行长比较大时候, 会使用大量内存, 可能发生OOM | 超级表的数据按时间戳排序时 |
举例:
```sql
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
SELECT /*+ SORT_FOR_GROUP() */ count(*), c1 FROM stable1 PARTITION BY c1;
SELECT /*+ PARTITION_FIRST() */ count(*), c1 FROM stable1 PARTITION BY c1;
SELECT /*+ PARA_TABLES_SORT() */ * from stable1 order by ts;
```
## 列表

View File

@ -491,6 +491,8 @@ TO_CHAR(ts, format_str_literal)
**功能说明**: 将timestamp类型按照指定格式转换为字符串
**版本**: ver-3.2.2.0
**返回结果数据类型**: VARCHAR
**应用字段**: TIMESTAMP
@ -550,6 +552,8 @@ TO_TIMESTAMP(ts_str_literal, format_str_literal)
**功能说明**: 将字符串按照指定格式转化为时间戳.
**版本**: ver-3.2.2.0
**返回结果数据类型**: TIMESTAMP
**应用字段**: VARCHAR

View File

@ -6,32 +6,68 @@ description: TDengine 消息队列提供的数据订阅功能
TDengine 3.0.0.0 开始对消息队列做了大幅的优化和增强以简化用户的解决方案。
## 创建订阅主题
## 创建 topic
TDengine 创建 topic 的个数上限通过参数 tmqMaxTopicNum 控制,默认 20 个。
TDengine 使用 SQL 创建一个 topic共有三种类型的 topic
### 查询 topic
语法:
```sql
CREATE TOPIC [IF NOT EXISTS] topic_name AS subquery;
CREATE TOPIC [IF NOT EXISTS] topic_name as subquery
```
通过 `SELECT` 语句订阅(包括 `SELECT *`,或 `SELECT ts, c1` 等指定查询订阅,可以带条件过滤、标量函数计算,但不支持聚合函数、不支持时间窗口聚合)。需要注意的是:
TOPIC 支持过滤和标量函数和 UDF 标量函数,不支持 JOIN、GROUP BY、窗口切分子句、聚合函数和 UDF 聚合函数。列订阅规则如下:
- 该类型 TOPIC 一旦创建则订阅数据的结构确定。
- 被订阅或用于计算的列或标签不可被删除(`ALTER table DROP`)、修改(`ALTER table MODIFY`)。
- 若发生表结构变更,新增的列不出现在结果中。
- 对于 select \*,则订阅展开为创建时所有的列(子表、普通表为数据列,超级表为数据列加标签列)
### 超级表 topic
1. TOPIC 一旦创建则返回结果的字段确定
2. 被订阅或用于计算的列不可被删除、修改
3. 列可以新增,但新增的列不出现在订阅结果字段中
4. 对于 select \*,则订阅展开为创建时所有的列(子表、普通表为数据列,超级表为数据列加标签列)
## 删除订阅主题
语法:
```sql
CREATE TOPIC [IF NOT EXISTS] topic_name [with meta] AS STABLE stb_name [where_condition]
```
`SELECT * from stbName` 订阅的区别是:
- 不会限制用户的表结构变更。
- 返回的是非结构化的数据:返回数据的结构会随之超级表的表结构变化而变化。
- with meta 参数可选选择时将返回创建超级表子表等语句主要用于taosx做超级表迁移
- where_condition 参数可选选择时将用来过滤符合条件的子表订阅这些子表。where 条件里不能有普通列只能是tag或tbnamewhere条件里可以用函数用来过滤tag但是不能是聚合函数因为子表tag值无法做聚合。也可以是常量表达式比如 2 > 1订阅全部子表或者 false订阅0个子表
- 返回数据不包含标签。
### 数据库 topic
语法:
```sql
CREATE TOPIC [IF NOT EXISTS] topic_name [with meta] AS DATABASE db_name;
```
通过该语句可创建一个包含数据库所有表数据的订阅
- with meta 参数可选选择时将返回创建数据库里所有超级表子表的语句主要用于taosx做数据库迁移
说明: 超级表订阅和库订阅属于高级订阅模式,容易出错,如确实要使用,请咨询专业人员。
## 删除 topic
如果不再需要订阅数据,可以删除 topic需要注意只有当前未在订阅中的 TOPIC 才能被删除。
```sql
/* 删除 topic */
DROP TOPIC [IF EXISTS] topic_name;
```
此时如果该订阅主题上存在 consumer则此 consumer 会收到一个错误。
## 查看订阅主题
## SHOW TOPICS
## 查看 topic
```sql
SHOW TOPICS;
@ -58,3 +94,11 @@ SHOW CONSUMERS;
```
显示当前数据库下所有活跃的消费者的信息。
## 查看订阅信息
```sql
SHOW SUBSCRIPTIONS;
```
显示 consumer 与 vgroup 之间的分配关系和消费信息

View File

@ -34,7 +34,7 @@ subquery: SELECT select_list
stb_name 是保存计算结果的超级表的表名如果该超级表不存在会自动创建如果已存在则检查列的schema信息。详见 写入已存在的超级表
TAGS 句定义了流计算中创建TAG的规则可以为每个partition对应的子表生成自定义的TAG值详见 自定义TAG
TAGS 句定义了流计算中创建TAG的规则可以为每个partition对应的子表生成自定义的TAG值详见 自定义TAG
```sql
create_definition:
col_name column_definition
@ -77,7 +77,7 @@ SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(
CREATE STREAM avg_vol_s INTO avg_vol SUBTABLE(CONCAT('new-', tname)) AS SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname tname INTERVAL(1m);
```
PARTITION 子句中,为 tbname 定义了一个别名 tname, 在PARTITION 子句中的别名可以用于 SUBTABLE 子句中的表达式计算,在上述示例中,流新创建的子表将以前缀 'new-' 连接原表名作为表名。
PARTITION 子句中,为 tbname 定义了一个别名 tname, 在PARTITION 子句中的别名可以用于 SUBTABLE 子句中的表达式计算,在上述示例中,流新创建的子表将以前缀 'new-' 连接原表名作为表名(从3.2.3.0开始,为了避免 sutable 中的表达式无法区分各个子表,即误将多个相同时间线写入一个子表,在指定的子表名后面加上 _groupId)
注意,子表名的长度若超过 TDengine 的限制,将被截断。若要生成的子表名已经存在于另一超级表,由于 TDengine 的子表名是唯一的,因此对应新子表的创建以及数据的写入将会失败。
@ -212,8 +212,8 @@ CREATE STREAM streams2 trigger at_once INTO st1 TAGS(cc varchar(100)) as select
PARTITION 子句中,为 concat("tag-", tbname)定义了一个别名cc, 对应超级表st1的自定义TAG的名字。在上述示例中流新创建的子表的TAG将以前缀 'new-' 连接原表名作为TAG的值。
会对TAG信息进行如下检查
1.检查tag的schema信息是否匹配对于不匹配的则自动进行数据类型转换当前只有数据长度大于4096byte时才报错其余场景都能进行类型转换。
2.检查tag的个数是否相同如果不同需要显示的指定超级表与subquery的tag的对应关系否则报错如果相同可以指定对应关系也可以不指定不指定则按位置顺序对应。
1. 检查tag的schema信息是否匹配对于不匹配的则自动进行数据类型转换当前只有数据长度大于4096byte时才报错其余场景都能进行类型转换。
2. 检查tag的个数是否相同如果不同需要显示的指定超级表与subquery的tag的对应关系否则报错如果相同可以指定对应关系也可以不指定不指定则按位置顺序对应。
## 清理中间状态

View File

@ -8,16 +8,15 @@ description: TDengine 中使用转义字符的详细规则
| 字符序列 | **代表的字符** |
| :------: | -------------- |
| `\'` | 单引号' |
| `\"` | 双引号" |
| \n | 换行符 |
| \r | 回车符 |
| \t | tab 符 |
| `\\` | 斜杠\ |
| `\%` | % 规则见下 |
| `\_` | \_ 规则见下 |
| `\'` | 单引号`'` |
| `\"` | 双引号`"` |
| `\n` | 换行符 |
| `\r` | 回车符 |
| `\t` | tab 符 |
| `\\` | 斜杠 `\ ` |
| `\%` | `%` 规则见下 |
| `\_` | `_` 规则见下 |
:::
## 转义字符使用规则
@ -25,5 +24,5 @@ description: TDengine 中使用转义字符的详细规则
1. 普通标识符: 直接提示错误的标识符,因为标识符规定必须是数字、字母和下划线,并且不能以数字开头。
2. 反引号``标识符: 保持原样,不转义
2. 数据里有转义字符
1. 遇到上面定义的转义字符会转义(%和\_见下面说明),如果没有匹配的转义字符会忽略掉转义符\。
2. 对于%和\_因为在 like 里这两个字符是通配符,所以在模式匹配 like 里用`\%`%和`\_`表示字符里本身的%和\_如果在 like 模式匹配上下文之外使用`\%`或`\_`,则它们的计算结果为字符串`\%`和`\_`,而不是%和\_
1. 遇到上面定义的转义字符会转义(`%`和`_`见下面说明),如果没有匹配的转义字符会忽略掉转义符`\ ``\x`保持原样)
2. 对于`%`和`_`,因为在`like`里这两个字符是通配符,所以在模式匹配`like`里用`\%`和`\_`表示字符里本身的`%`和`_`,如果在`like`模式匹配上下文之外使用`\%`或`\_`,则它们的计算结果为字符串`\%`和`\_`,而不是`%`和`_`

View File

@ -87,19 +87,20 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
1. 将使用如下规则来生成子表名:首先将 measurement 的名称和标签的 key 和 value 组合成为如下的字符串
```json
"measurement,tag_key1=tag_value1,tag_key2=tag_value2"
```
```json
"measurement,tag_key1=tag_value1,tag_key2=tag_value2"
```
:::tip
需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序而是使用了标签名称按照字符串升序排列后的结果。所以tag_key1 并不是在行协议中输入的第一个标签。
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
:::tip
如果不想用自动生成的表名,有两种指定子表名的方式,第一种优先级更高:
通过在taos.cfg里配置 smlAutoChildTableNameDelimiter 参数来指定(`@ # 空格 回车 换行 制表符`除外)。
举例如下:配置 smlAutoChildTableNameDelimiter=- 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1-4。
通过在taos.cfg里配置 smlChildTableName 参数来指定。
举例如下:配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
:::tip
需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序而是使用了标签名称按照字符串升序排列后的结果。所以tag_key1 并不是在行协议中输入的第一个标签。
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
:::tip
如果不想用自动生成的表名,有两种指定子表名的方式(第一种优先级更高)。
1. 通过在taos.cfg里配置 smlAutoChildTableNameDelimiter 参数来指定(`@ # 空格 回车 换行 制表符`除外)。
1. 举例如下:配置 smlAutoChildTableNameDelimiter=- 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1-4。
2. 通过在taos.cfg里配置 smlChildTableName 参数来指定。
1. 举例如下:配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略。
2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)。
3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。

View File

@ -52,6 +52,9 @@ extern "C" {
#define TSDB_INS_TABLE_VIEWS "ins_views"
#define TSDB_INS_TABLE_COMPACTS "ins_compacts"
#define TSDB_INS_TABLE_COMPACT_DETAILS "ins_compact_details"
#define TSDB_INS_TABLE_GRANTS_FULL "ins_grants_full"
#define TSDB_INS_TABLE_GRANTS_LOGS "ins_grants_logs"
#define TSDB_INS_TABLE_MACHINES "ins_machines"
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
#define TSDB_PERFS_TABLE_SMAS "perf_smas"
@ -62,6 +65,11 @@ extern "C" {
#define TSDB_PERFS_TABLE_TRANS "perf_trans"
#define TSDB_PERFS_TABLE_APPS "perf_apps"
#define TSDB_AUDIT_DB "audit"
#define TSDB_AUDIT_STB_OPERATION "operations"
#define TSDB_AUDIT_CTB_OPERATION "t_operations_"
#define TSDB_AUDIT_CTB_OPERATION_LEN 13
typedef struct SSysDbTableSchema {
const char* name;
const int32_t type;

View File

@ -54,6 +54,8 @@ typedef struct SSessionKey {
uint64_t groupId;
} SSessionKey;
typedef int64_t COUNT_TYPE;
typedef struct SVersionRange {
int64_t minVer;
int64_t maxVer;
@ -171,6 +173,7 @@ typedef enum EStreamType {
STREAM_CHECKPOINT,
STREAM_CREATE_CHILD_TABLE,
STREAM_TRANS_STATE,
STREAM_MID_RETRIEVE,
} EStreamType;
#pragma pack(push, 1)
@ -206,6 +209,7 @@ typedef struct SDataBlockInfo {
int16_t hasVarCol;
int16_t dataLoad; // denote if the data is loaded or not
uint8_t scanFlag;
bool blankFill;
// TODO: optimize and remove following
int64_t version; // used for stream, and need serialization

View File

@ -32,6 +32,9 @@ typedef struct SBlockOrderInfo {
SColumnInfoData* pColData;
} SBlockOrderInfo;
#define BLOCK_VERSION_1 1
#define BLOCK_VERSION_2 2
#define NBIT (3u)
#define BitPos(_n) ((_n) & ((1 << NBIT) - 1))
#define BMCharPos(bm_, r_) ((bm_)[(r_) >> NBIT])

View File

@ -211,6 +211,7 @@ extern int32_t tsUptimeInterval;
extern bool tsDisableStream;
extern int64_t tsStreamBufferSize;
extern int tsStreamAggCnt;
extern bool tsFilterScalarMode;
extern int32_t tsMaxStreamBackendCache;
extern int32_t tsPQSortMemThreshold;
@ -232,7 +233,7 @@ struct SConfig *taosGetCfg();
void taosSetAllDebugFlag(int32_t flag);
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal);
void taosLocalCfgForbiddenToChange(char *name, bool *forbidden);
int8_t taosGranted();
int8_t taosGranted(int8_t type);
#ifdef __cplusplus
}

View File

@ -22,15 +22,17 @@ extern "C" {
#include "os.h"
#include "taoserror.h"
#ifdef GRANTS_CFG
#include "tgrantCfg.h"
#endif
#include "tdef.h"
#ifndef GRANTS_COL_MAX_LEN
#define GRANTS_COL_MAX_LEN 196
#endif
#define GRANT_HEART_BEAT_MIN 2
#define GRANT_ACTIVE_CODE "activeCode"
#define GRANT_FLAG_ALL (0x01)
#define GRANT_FLAG_AUDIT (0x02)
#define GRANT_FLAG_VIEW (0x04)
typedef enum {
TSDB_GRANT_ALL,
@ -48,63 +50,55 @@ typedef enum {
TSDB_GRANT_CPU_CORES,
TSDB_GRANT_STABLE,
TSDB_GRANT_TABLE,
TSDB_GRANT_SUBSCRIPTION,
TSDB_GRANT_AUDIT,
TSDB_GRANT_CSV,
TSDB_GRANT_VIEW,
TSDB_GRANT_MULTI_TIER,
TSDB_GRANT_BACKUP_RESTORE,
} EGrantType;
int32_t grantCheck(EGrantType grant);
int32_t grantAlterActiveCode(int32_t did, const char* old, const char* newer, char* out, int8_t type);
int32_t grantCheckExpire(EGrantType grant);
char* tGetMachineId();
#ifdef TD_UNIQ_GRANT
int32_t grantCheckLE(EGrantType grant);
#endif
#ifndef GRANTS_CFG
// #ifndef GRANTS_CFG
#ifdef TD_ENTERPRISE
#define GRANTS_SCHEMA \
static const SSysDbTableSchema grantsSchema[] = { \
{.name = "version", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "expire_time", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "expired", .bytes = 5 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "storage", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "timeseries", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "databases", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "users", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "accounts", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "dnodes", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "connections", .bytes = 11 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "streams", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "cpu_cores", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "speed", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "querytime", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "opc_da", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "opc_ua", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "pi", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "kafka", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "influxdb", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "mqtt", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
#define GRANTS_SCHEMA \
static const SSysDbTableSchema grantsSchema[] = { \
{.name = "version", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "expire_time", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "service_time", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "expired", .bytes = 5 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "state", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "timeseries", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "dnodes", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "cpu_cores", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
}
#else
#define GRANTS_SCHEMA \
static const SSysDbTableSchema grantsSchema[] = { \
{.name = "version", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "expire_time", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "expired", .bytes = 5 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "storage", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "timeseries", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "databases", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "users", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "accounts", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "dnodes", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "connections", .bytes = 11 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "streams", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "cpu_cores", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "speed", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "querytime", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
#define GRANTS_SCHEMA \
static const SSysDbTableSchema grantsSchema[] = { \
{.name = "version", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "expire_time", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "service_time", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "expired", .bytes = 5 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "state", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "timeseries", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "dnodes", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "cpu_cores", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
}
#endif
#define GRANT_CFG_ADD
#define GRANT_CFG_SET
#define GRANT_CFG_GET
#define GRANT_CFG_CHECK
#define GRANT_CFG_SKIP
#define GRANT_CFG_DECLARE
#define GRANT_CFG_EXTERN
#endif
// #define GRANT_CFG_ADD
// #define GRANT_CFG_SET
// #define GRANT_CFG_GET
// #define GRANT_CFG_CHECK
// #define GRANT_CFG_SKIP
// #define GRANT_CFG_DECLARE
// #define GRANT_CFG_EXTERN
// #endif
#ifdef __cplusplus
}

View File

@ -47,10 +47,11 @@ typedef struct SCorEpSet {
int32_t taosGetFqdnPortFromEp(const char* ep, SEp* pEp);
void addEpIntoEpSet(SEpSet* pEpSet, const char* fqdn, uint16_t port);
bool isEpsetEqual(const SEpSet* s1, const SEpSet* s2);
void epsetAssign(SEpSet* dst, const SEpSet* pSrc);
void updateEpSet_s(SCorEpSet* pEpSet, SEpSet* pNewEpSet);
SEpSet getEpSet_s(SCorEpSet* pEpSet);
bool isEpsetEqual(const SEpSet* s1, const SEpSet* s2);
void epsetAssign(SEpSet* dst, const SEpSet* pSrc);
void updateEpSet_s(SCorEpSet* pEpSet, SEpSet* pNewEpSet);
SEpSet getEpSet_s(SCorEpSet* pEpSet);
void epsetSort(SEpSet* pEpSet);
#ifdef __cplusplus
}

View File

@ -147,6 +147,9 @@ typedef enum _mgmt_table {
TSDB_MGMT_TABLE_VIEWS,
TSDB_MGMT_TABLE_COMPACT,
TSDB_MGMT_TABLE_COMPACT_DETAIL,
TSDB_MGMT_TABLE_GRANTS_FULL,
TSDB_MGMT_TABLE_GRANTS_LOGS,
TSDB_MGMT_TABLE_MACHINES,
TSDB_MGMT_TABLE_MAX,
} EShowType;
@ -204,9 +207,6 @@ typedef enum _mgmt_table {
#define TD_CHILD_TABLE TSDB_CHILD_TABLE
#define TD_NORMAL_TABLE TSDB_NORMAL_TABLE
#define TD_REQ_FROM_APP 0
#define TD_REQ_FROM_TAOX 1
typedef enum ENodeType {
// Syntax nodes are used in parser and planner module, and some are also used in executor module, such as COLUMN,
// VALUE, OPERATOR, FUNCTION and so on.
@ -244,6 +244,7 @@ typedef enum ENodeType {
QUERY_NODE_EVENT_WINDOW,
QUERY_NODE_HINT,
QUERY_NODE_VIEW,
QUERY_NODE_COUNT_WINDOW,
// Statement nodes are used in parser and planner module.
QUERY_NODE_SET_OPERATOR = 100,
@ -298,7 +299,8 @@ typedef enum ENodeType {
QUERY_NODE_SYNCDB_STMT,
QUERY_NODE_GRANT_STMT,
QUERY_NODE_REVOKE_STMT,
// placeholder for [152, 180]
QUERY_NODE_ALTER_CLUSTER_STMT,
// placeholder for [153, 180]
QUERY_NODE_SHOW_CREATE_VIEW_STMT = 181,
QUERY_NODE_SHOW_CREATE_DATABASE_STMT,
QUERY_NODE_SHOW_CREATE_TABLE_STMT,
@ -359,6 +361,9 @@ typedef enum ENodeType {
QUERY_NODE_SHOW_VIEWS_STMT,
QUERY_NODE_SHOW_COMPACTS_STMT,
QUERY_NODE_SHOW_COMPACT_DETAILS_STMT,
QUERY_NODE_SHOW_GRANTS_FULL_STMT,
QUERY_NODE_SHOW_GRANTS_LOGS_STMT,
QUERY_NODE_SHOW_CLUSTER_MACHINES_STMT,
// logic plan node
QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
@ -424,7 +429,10 @@ typedef enum ENodeType {
QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT,
QUERY_NODE_PHYSICAL_PLAN_HASH_JOIN,
QUERY_NODE_PHYSICAL_PLAN_GROUP_CACHE,
QUERY_NODE_PHYSICAL_PLAN_DYN_QUERY_CTRL
QUERY_NODE_PHYSICAL_PLAN_DYN_QUERY_CTRL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT,
QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT,
QUERY_NODE_PHYSICAL_PLAN_STREAM_MID_INTERVAL,
} ENodeType;
typedef struct {
@ -748,7 +756,7 @@ static FORCE_INLINE int32_t tDecodeSSchemaWrapperEx(SDecoder* pDecoder, SSchemaW
typedef struct {
char name[TSDB_TABLE_FNAME_LEN];
int8_t igExists;
int8_t source; // 1-taosX or 0-taosClient
int8_t source; // TD_REQ_FROM_TAOX-taosX or TD_REQ_FROM_APP-taosClient
int8_t reserved[6];
tb_uid_t suid;
int64_t delay1;
@ -791,7 +799,7 @@ void tFreeSMCreateStbRsp(SMCreateStbRsp* pRsp);
typedef struct {
char name[TSDB_TABLE_FNAME_LEN];
int8_t igNotExists;
int8_t source; // 1-taosX or 0-taosClient
int8_t source; // TD_REQ_FROM_TAOX-taosX or TD_REQ_FROM_APP-taosClient
int8_t reserved[6];
tb_uid_t suid;
int32_t sqlLen;
@ -1392,7 +1400,7 @@ void tFreeSCompactDbReq(SCompactDbReq* pReq);
typedef struct {
int32_t compactId;
int8_t bAccepted;
int8_t bAccepted;
} SCompactDbRsp;
int32_t tSerializeSCompactDbRsp(void* buf, int32_t bufLen, SCompactDbRsp* pRsp);
@ -1406,7 +1414,7 @@ typedef struct {
int32_t tSerializeSKillCompactReq(void* buf, int32_t bufLen, SKillCompactReq* pReq);
int32_t tDeserializeSKillCompactReq(void* buf, int32_t bufLen, SKillCompactReq* pReq);
void tFreeSKillCompactReq(SKillCompactReq *pReq);
void tFreeSKillCompactReq(SKillCompactReq* pReq);
typedef struct {
char name[TSDB_FUNC_NAME_LEN];
@ -1557,9 +1565,11 @@ typedef struct {
int64_t updateTime;
float numOfCores;
int32_t numOfSupportVnodes;
int32_t numOfDiskCfg;
int64_t memTotal;
int64_t memAvail;
char dnodeEp[TSDB_EP_LEN];
char machineId[TSDB_MACHINE_ID_LEN + 1];
SMnodeLoad mload;
SQnodeLoad qload;
SClusterCfg clusterCfg;
@ -1740,9 +1750,9 @@ int32_t tSerializeSCompactVnodeReq(void* buf, int32_t bufLen, SCompactVnodeReq*
int32_t tDeserializeSCompactVnodeReq(void* buf, int32_t bufLen, SCompactVnodeReq* pReq);
typedef struct {
int32_t compactId;
int32_t vgId;
int32_t dnodeId;
int32_t compactId;
int32_t vgId;
int32_t dnodeId;
} SVKillCompactReq;
int32_t tSerializeSVKillCompactReq(void* buf, int32_t bufLen, SVKillCompactReq* pReq);
@ -1943,9 +1953,9 @@ typedef struct {
char db[TSDB_DB_FNAME_LEN];
char tb[TSDB_TABLE_NAME_LEN];
char user[TSDB_USER_LEN];
char filterTb[TSDB_TABLE_NAME_LEN]; // for ins_columns
char filterTb[TSDB_TABLE_NAME_LEN]; // for ins_columns
int64_t showId;
int64_t compactId; // for compact
int64_t compactId; // for compact
} SRetrieveTableReq;
typedef struct SSysTableSchema {
@ -1975,6 +1985,14 @@ typedef struct {
char data[];
} SRetrieveTableRsp;
typedef struct {
int64_t version;
int64_t numOfRows;
int8_t compressed;
int8_t precision;
char data[];
} SRetrieveTableRspForTmq;
typedef struct {
int64_t handle;
int64_t useconds;
@ -2023,6 +2041,17 @@ int32_t tSerializeSExplainRsp(void* buf, int32_t bufLen, SExplainRsp* pRsp);
int32_t tDeserializeSExplainRsp(void* buf, int32_t bufLen, SExplainRsp* pRsp);
void tFreeSExplainRsp(SExplainRsp* pRsp);
typedef struct {
char config[TSDB_DNODE_CONFIG_LEN];
char value[TSDB_CLUSTER_VALUE_LEN];
int32_t sqlLen;
char* sql;
} SMCfgClusterReq;
int32_t tSerializeSMCfgClusterReq(void* buf, int32_t bufLen, SMCfgClusterReq* pReq);
int32_t tDeserializeSMCfgClusterReq(void* buf, int32_t bufLen, SMCfgClusterReq* pReq);
void tFreeSMCfgClusterReq(SMCfgClusterReq* pReq);
typedef struct {
char fqdn[TSDB_FQDN_LEN]; // end point, hostname:port
int32_t port;
@ -2400,6 +2429,11 @@ typedef struct SColLocation {
int8_t type;
} SColLocation;
typedef struct SVgroupVer {
int32_t vgId;
int64_t ver;
} SVgroupVer;
typedef struct {
char name[TSDB_STREAM_FNAME_LEN];
char sourceDB[TSDB_DB_FNAME_LEN];
@ -2423,6 +2457,7 @@ typedef struct {
int64_t deleteMark;
int8_t igUpdate;
int64_t lastTs;
SArray* pVgroupVerList;
} SCMCreateStreamReq;
typedef struct {
@ -2622,6 +2657,7 @@ typedef struct SVCreateStbReq {
SRSmaParam rsmaParam;
int32_t alterOriDataLen;
void* alterOriData;
int8_t source;
} SVCreateStbReq;
int tEncodeSVCreateStbReq(SEncoder* pCoder, const SVCreateStbReq* pReq);
@ -2691,6 +2727,7 @@ typedef struct {
SVCreateTbReq* pReqs;
SArray* pArray;
};
int8_t source; // TD_REQ_FROM_TAOX-taosX or TD_REQ_FROM_APP-taosClient
} SVCreateTbBatchReq;
int tEncodeSVCreateTbBatchReq(SEncoder* pCoder, const SVCreateTbBatchReq* pReq);
@ -2783,6 +2820,7 @@ typedef struct {
int32_t newCommentLen;
char* newComment;
int64_t ctimeMs; // fill by vnode
int8_t source; // TD_REQ_FROM_TAOX-taosX or TD_REQ_FROM_APP-taosClient
} SVAlterTbReq;
int32_t tEncodeSVAlterTbReq(SEncoder* pEncoder, const SVAlterTbReq* pReq);
@ -3146,18 +3184,11 @@ typedef struct {
typedef struct {
SMsgHead head;
int64_t leftForVer;
int64_t resetRelHalt; // reset related stream task halt status
int64_t streamId;
int32_t taskId;
} SVDropStreamTaskReq;
typedef struct {
SMsgHead head;
int64_t streamId;
int32_t taskId;
int64_t dataVer;
} SVStreamTaskVerUpdateReq;
typedef struct {
int8_t reserved;
} SVDropStreamTaskRsp;
@ -3323,7 +3354,7 @@ typedef struct {
SMsgHead head;
int64_t streamId;
int32_t taskId;
} SVPauseStreamTaskReq, SVResetStreamTaskReq, SVDropHTaskReq;
} SVPauseStreamTaskReq, SVResetStreamTaskReq;
typedef struct {
int8_t reserved;
@ -3603,6 +3634,7 @@ typedef struct {
int64_t timeout;
STqOffsetVal reqOffset;
int8_t enableReplay;
int8_t sourceExcluded;
} SMqPollReq;
int32_t tSerializeSMqPollReq(void* buf, int32_t bufLen, SMqPollReq* pReq);
@ -3732,6 +3764,7 @@ typedef struct {
int32_t vgId;
STqOffsetVal offset;
int64_t rows;
int64_t ever;
} OffsetRows;
typedef struct {
@ -3746,7 +3779,12 @@ typedef struct {
} SMqHbReq;
typedef struct {
int8_t reserved;
char topic[TSDB_TOPIC_FNAME_LEN];
int8_t noPrivilege;
} STopicPrivilege;
typedef struct {
SArray* topicPrivileges; // SArray<STopicPrivilege>
} SMqHbRsp;
typedef struct {
@ -3765,18 +3803,6 @@ typedef struct {
SVCreateTbReq cTbReq;
} SVSubmitBlk;
typedef struct {
int32_t flags;
int32_t nBlocks;
union {
SArray* pArray;
SVSubmitBlk* pBlocks;
};
} SVSubmitReq;
int32_t tEncodeSVSubmitReq(SEncoder* pCoder, const SVSubmitReq* pReq);
int32_t tDecodeSVSubmitReq(SDecoder* pCoder, SVSubmitReq* pReq);
typedef struct {
SMsgHead header;
uint64_t sId;
@ -3885,11 +3911,20 @@ int32_t tSerializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
int32_t tDeserializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
int32_t tDeatroySMqHbReq(SMqHbReq* pReq);
int32_t tSerializeSMqHbRsp(void* buf, int32_t bufLen, SMqHbRsp* pRsp);
int32_t tDeserializeSMqHbRsp(void* buf, int32_t bufLen, SMqHbRsp* pRsp);
int32_t tDeatroySMqHbRsp(SMqHbRsp* pRsp);
int32_t tSerializeSMqSeekReq(void* buf, int32_t bufLen, SMqSeekReq* pReq);
int32_t tDeserializeSMqSeekReq(void* buf, int32_t bufLen, SMqSeekReq* pReq);
#define TD_REQ_FROM_APP 0x0
#define SUBMIT_REQ_AUTO_CREATE_TABLE 0x1
#define SUBMIT_REQ_COLUMN_DATA_FORMAT 0x2
#define SUBMIT_REQ_FROM_FILE 0x4
#define TD_REQ_FROM_TAOX 0x8
#define TD_REQ_FROM_TAOX_OLD 0x1 // for compatibility
typedef struct {
int32_t flags;

View File

@ -217,6 +217,8 @@
TD_DEF_MSG_TYPE(TDMT_MND_VIEW_META, "view-meta", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_KILL_COMPACT, "kill-compact", SKillCompactReq, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_COMPACT_TIMER, "compact-tmr", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_REQ_CHKPT, "stream-req-checkpoint", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CONFIG_CLUSTER, "config-cluster", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
TD_CLOSE_MSG_SEG(TDMT_END_MND_MSG)
@ -301,7 +303,6 @@
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_PAUSE, "stream-task-pause", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESUME, "stream-task-resume", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_STOP, "stream-task-stop", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_HTASK_DROP, "stream-htask-drop", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL)
TD_CLOSE_MSG_SEG(TDMT_END_STREAM_MSG)
@ -337,13 +338,13 @@
TD_DEF_MSG_TYPE(TDMT_SYNC_LOCAL_CMD, "sync-local-cmd", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_PREP_SNAPSHOT, "sync-prep-snapshot", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_PREP_SNAPSHOT_REPLY, "sync-prep-snapshot-reply", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_UNUSED_CODE, "sync-unused", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_FORCE_FOLLOWER, "sync-force-become-follower", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL)
TD_CLOSE_MSG_SEG(TDMT_END_SYNC_MSG)
TD_NEW_MSG_SEG(TDMT_VND_STREAM_MSG) //7 << 8
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY, "vnode-stream-scan-history", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY_FINISH, "vnode-stream-scan-history-finish", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_CHECK_POINT_SOURCE, "vnode-stream-checkpoint-source", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_UPDATE, "vnode-stream-update", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_RESET, "vnode-stream-reset", NULL, NULL)

View File

@ -74,295 +74,300 @@
#define TK_NK_IPTOKEN 55
#define TK_FORCE 56
#define TK_UNSAFE 57
#define TK_LOCAL 58
#define TK_QNODE 59
#define TK_BNODE 60
#define TK_SNODE 61
#define TK_MNODE 62
#define TK_VNODE 63
#define TK_DATABASE 64
#define TK_USE 65
#define TK_FLUSH 66
#define TK_TRIM 67
#define TK_COMPACT 68
#define TK_IF 69
#define TK_NOT 70
#define TK_EXISTS 71
#define TK_BUFFER 72
#define TK_CACHEMODEL 73
#define TK_CACHESIZE 74
#define TK_COMP 75
#define TK_DURATION 76
#define TK_NK_VARIABLE 77
#define TK_MAXROWS 78
#define TK_MINROWS 79
#define TK_KEEP 80
#define TK_PAGES 81
#define TK_PAGESIZE 82
#define TK_TSDB_PAGESIZE 83
#define TK_PRECISION 84
#define TK_REPLICA 85
#define TK_VGROUPS 86
#define TK_SINGLE_STABLE 87
#define TK_RETENTIONS 88
#define TK_SCHEMALESS 89
#define TK_WAL_LEVEL 90
#define TK_WAL_FSYNC_PERIOD 91
#define TK_WAL_RETENTION_PERIOD 92
#define TK_WAL_RETENTION_SIZE 93
#define TK_WAL_ROLL_PERIOD 94
#define TK_WAL_SEGMENT_SIZE 95
#define TK_STT_TRIGGER 96
#define TK_TABLE_PREFIX 97
#define TK_TABLE_SUFFIX 98
#define TK_KEEP_TIME_OFFSET 99
#define TK_NK_COLON 100
#define TK_BWLIMIT 101
#define TK_START 102
#define TK_TIMESTAMP 103
#define TK_END 104
#define TK_TABLE 105
#define TK_NK_LP 106
#define TK_NK_RP 107
#define TK_STABLE 108
#define TK_COLUMN 109
#define TK_MODIFY 110
#define TK_RENAME 111
#define TK_TAG 112
#define TK_SET 113
#define TK_NK_EQ 114
#define TK_USING 115
#define TK_TAGS 116
#define TK_BOOL 117
#define TK_TINYINT 118
#define TK_SMALLINT 119
#define TK_INT 120
#define TK_INTEGER 121
#define TK_BIGINT 122
#define TK_FLOAT 123
#define TK_DOUBLE 124
#define TK_BINARY 125
#define TK_NCHAR 126
#define TK_UNSIGNED 127
#define TK_JSON 128
#define TK_VARCHAR 129
#define TK_MEDIUMBLOB 130
#define TK_BLOB 131
#define TK_VARBINARY 132
#define TK_GEOMETRY 133
#define TK_DECIMAL 134
#define TK_COMMENT 135
#define TK_MAX_DELAY 136
#define TK_WATERMARK 137
#define TK_ROLLUP 138
#define TK_TTL 139
#define TK_SMA 140
#define TK_DELETE_MARK 141
#define TK_FIRST 142
#define TK_LAST 143
#define TK_SHOW 144
#define TK_PRIVILEGES 145
#define TK_DATABASES 146
#define TK_TABLES 147
#define TK_STABLES 148
#define TK_MNODES 149
#define TK_QNODES 150
#define TK_FUNCTIONS 151
#define TK_INDEXES 152
#define TK_ACCOUNTS 153
#define TK_APPS 154
#define TK_CONNECTIONS 155
#define TK_LICENCES 156
#define TK_GRANTS 157
#define TK_QUERIES 158
#define TK_SCORES 159
#define TK_TOPICS 160
#define TK_VARIABLES 161
#define TK_CLUSTER 162
#define TK_BNODES 163
#define TK_SNODES 164
#define TK_TRANSACTIONS 165
#define TK_DISTRIBUTED 166
#define TK_CONSUMERS 167
#define TK_SUBSCRIPTIONS 168
#define TK_VNODES 169
#define TK_ALIVE 170
#define TK_VIEWS 171
#define TK_VIEW 172
#define TK_COMPACTS 173
#define TK_NORMAL 174
#define TK_CHILD 175
#define TK_LIKE 176
#define TK_TBNAME 177
#define TK_QTAGS 178
#define TK_AS 179
#define TK_SYSTEM 180
#define TK_INDEX 181
#define TK_FUNCTION 182
#define TK_INTERVAL 183
#define TK_COUNT 184
#define TK_LAST_ROW 185
#define TK_META 186
#define TK_ONLY 187
#define TK_TOPIC 188
#define TK_CONSUMER 189
#define TK_GROUP 190
#define TK_DESC 191
#define TK_DESCRIBE 192
#define TK_RESET 193
#define TK_QUERY 194
#define TK_CACHE 195
#define TK_EXPLAIN 196
#define TK_ANALYZE 197
#define TK_VERBOSE 198
#define TK_NK_BOOL 199
#define TK_RATIO 200
#define TK_NK_FLOAT 201
#define TK_OUTPUTTYPE 202
#define TK_AGGREGATE 203
#define TK_BUFSIZE 204
#define TK_LANGUAGE 205
#define TK_REPLACE 206
#define TK_STREAM 207
#define TK_INTO 208
#define TK_PAUSE 209
#define TK_RESUME 210
#define TK_TRIGGER 211
#define TK_AT_ONCE 212
#define TK_WINDOW_CLOSE 213
#define TK_IGNORE 214
#define TK_EXPIRED 215
#define TK_FILL_HISTORY 216
#define TK_UPDATE 217
#define TK_SUBTABLE 218
#define TK_UNTREATED 219
#define TK_KILL 220
#define TK_CONNECTION 221
#define TK_TRANSACTION 222
#define TK_BALANCE 223
#define TK_VGROUP 224
#define TK_LEADER 225
#define TK_MERGE 226
#define TK_REDISTRIBUTE 227
#define TK_SPLIT 228
#define TK_DELETE 229
#define TK_INSERT 230
#define TK_NULL 231
#define TK_NK_QUESTION 232
#define TK_NK_ALIAS 233
#define TK_NK_ARROW 234
#define TK_ROWTS 235
#define TK_QSTART 236
#define TK_QEND 237
#define TK_QDURATION 238
#define TK_WSTART 239
#define TK_WEND 240
#define TK_WDURATION 241
#define TK_IROWTS 242
#define TK_ISFILLED 243
#define TK_CAST 244
#define TK_NOW 245
#define TK_TODAY 246
#define TK_TIMEZONE 247
#define TK_CLIENT_VERSION 248
#define TK_SERVER_VERSION 249
#define TK_SERVER_STATUS 250
#define TK_CURRENT_USER 251
#define TK_CASE 252
#define TK_WHEN 253
#define TK_THEN 254
#define TK_ELSE 255
#define TK_BETWEEN 256
#define TK_IS 257
#define TK_NK_LT 258
#define TK_NK_GT 259
#define TK_NK_LE 260
#define TK_NK_GE 261
#define TK_NK_NE 262
#define TK_MATCH 263
#define TK_NMATCH 264
#define TK_CONTAINS 265
#define TK_IN 266
#define TK_JOIN 267
#define TK_INNER 268
#define TK_SELECT 269
#define TK_NK_HINT 270
#define TK_DISTINCT 271
#define TK_WHERE 272
#define TK_PARTITION 273
#define TK_BY 274
#define TK_SESSION 275
#define TK_STATE_WINDOW 276
#define TK_EVENT_WINDOW 277
#define TK_SLIDING 278
#define TK_FILL 279
#define TK_VALUE 280
#define TK_VALUE_F 281
#define TK_NONE 282
#define TK_PREV 283
#define TK_NULL_F 284
#define TK_LINEAR 285
#define TK_NEXT 286
#define TK_HAVING 287
#define TK_RANGE 288
#define TK_EVERY 289
#define TK_ORDER 290
#define TK_SLIMIT 291
#define TK_SOFFSET 292
#define TK_LIMIT 293
#define TK_OFFSET 294
#define TK_ASC 295
#define TK_NULLS 296
#define TK_ABORT 297
#define TK_AFTER 298
#define TK_ATTACH 299
#define TK_BEFORE 300
#define TK_BEGIN 301
#define TK_BITAND 302
#define TK_BITNOT 303
#define TK_BITOR 304
#define TK_BLOCKS 305
#define TK_CHANGE 306
#define TK_COMMA 307
#define TK_CONCAT 308
#define TK_CONFLICT 309
#define TK_COPY 310
#define TK_DEFERRED 311
#define TK_DELIMITERS 312
#define TK_DETACH 313
#define TK_DIVIDE 314
#define TK_DOT 315
#define TK_EACH 316
#define TK_FAIL 317
#define TK_FILE 318
#define TK_FOR 319
#define TK_GLOB 320
#define TK_ID 321
#define TK_IMMEDIATE 322
#define TK_IMPORT 323
#define TK_INITIALLY 324
#define TK_INSTEAD 325
#define TK_ISNULL 326
#define TK_KEY 327
#define TK_MODULES 328
#define TK_NK_BITNOT 329
#define TK_NK_SEMI 330
#define TK_NOTNULL 331
#define TK_OF 332
#define TK_PLUS 333
#define TK_PRIVILEGE 334
#define TK_RAISE 335
#define TK_RESTRICT 336
#define TK_ROW 337
#define TK_SEMI 338
#define TK_STAR 339
#define TK_STATEMENT 340
#define TK_STRICT 341
#define TK_STRING 342
#define TK_TIMES 343
#define TK_VALUES 344
#define TK_VARIABLE 345
#define TK_WAL 346
#define TK_CLUSTER 58
#define TK_LOCAL 59
#define TK_QNODE 60
#define TK_BNODE 61
#define TK_SNODE 62
#define TK_MNODE 63
#define TK_VNODE 64
#define TK_DATABASE 65
#define TK_USE 66
#define TK_FLUSH 67
#define TK_TRIM 68
#define TK_COMPACT 69
#define TK_IF 70
#define TK_NOT 71
#define TK_EXISTS 72
#define TK_BUFFER 73
#define TK_CACHEMODEL 74
#define TK_CACHESIZE 75
#define TK_COMP 76
#define TK_DURATION 77
#define TK_NK_VARIABLE 78
#define TK_MAXROWS 79
#define TK_MINROWS 80
#define TK_KEEP 81
#define TK_PAGES 82
#define TK_PAGESIZE 83
#define TK_TSDB_PAGESIZE 84
#define TK_PRECISION 85
#define TK_REPLICA 86
#define TK_VGROUPS 87
#define TK_SINGLE_STABLE 88
#define TK_RETENTIONS 89
#define TK_SCHEMALESS 90
#define TK_WAL_LEVEL 91
#define TK_WAL_FSYNC_PERIOD 92
#define TK_WAL_RETENTION_PERIOD 93
#define TK_WAL_RETENTION_SIZE 94
#define TK_WAL_ROLL_PERIOD 95
#define TK_WAL_SEGMENT_SIZE 96
#define TK_STT_TRIGGER 97
#define TK_TABLE_PREFIX 98
#define TK_TABLE_SUFFIX 99
#define TK_KEEP_TIME_OFFSET 100
#define TK_NK_COLON 101
#define TK_BWLIMIT 102
#define TK_START 103
#define TK_TIMESTAMP 104
#define TK_END 105
#define TK_TABLE 106
#define TK_NK_LP 107
#define TK_NK_RP 108
#define TK_STABLE 109
#define TK_COLUMN 110
#define TK_MODIFY 111
#define TK_RENAME 112
#define TK_TAG 113
#define TK_SET 114
#define TK_NK_EQ 115
#define TK_USING 116
#define TK_TAGS 117
#define TK_BOOL 118
#define TK_TINYINT 119
#define TK_SMALLINT 120
#define TK_INT 121
#define TK_INTEGER 122
#define TK_BIGINT 123
#define TK_FLOAT 124
#define TK_DOUBLE 125
#define TK_BINARY 126
#define TK_NCHAR 127
#define TK_UNSIGNED 128
#define TK_JSON 129
#define TK_VARCHAR 130
#define TK_MEDIUMBLOB 131
#define TK_BLOB 132
#define TK_VARBINARY 133
#define TK_GEOMETRY 134
#define TK_DECIMAL 135
#define TK_COMMENT 136
#define TK_MAX_DELAY 137
#define TK_WATERMARK 138
#define TK_ROLLUP 139
#define TK_TTL 140
#define TK_SMA 141
#define TK_DELETE_MARK 142
#define TK_FIRST 143
#define TK_LAST 144
#define TK_SHOW 145
#define TK_PRIVILEGES 146
#define TK_DATABASES 147
#define TK_TABLES 148
#define TK_STABLES 149
#define TK_MNODES 150
#define TK_QNODES 151
#define TK_FUNCTIONS 152
#define TK_INDEXES 153
#define TK_ACCOUNTS 154
#define TK_APPS 155
#define TK_CONNECTIONS 156
#define TK_LICENCES 157
#define TK_GRANTS 158
#define TK_FULL 159
#define TK_LOGS 160
#define TK_MACHINES 161
#define TK_QUERIES 162
#define TK_SCORES 163
#define TK_TOPICS 164
#define TK_VARIABLES 165
#define TK_BNODES 166
#define TK_SNODES 167
#define TK_TRANSACTIONS 168
#define TK_DISTRIBUTED 169
#define TK_CONSUMERS 170
#define TK_SUBSCRIPTIONS 171
#define TK_VNODES 172
#define TK_ALIVE 173
#define TK_VIEWS 174
#define TK_VIEW 175
#define TK_COMPACTS 176
#define TK_NORMAL 177
#define TK_CHILD 178
#define TK_LIKE 179
#define TK_TBNAME 180
#define TK_QTAGS 181
#define TK_AS 182
#define TK_SYSTEM 183
#define TK_INDEX 184
#define TK_FUNCTION 185
#define TK_INTERVAL 186
#define TK_COUNT 187
#define TK_LAST_ROW 188
#define TK_META 189
#define TK_ONLY 190
#define TK_TOPIC 191
#define TK_CONSUMER 192
#define TK_GROUP 193
#define TK_DESC 194
#define TK_DESCRIBE 195
#define TK_RESET 196
#define TK_QUERY 197
#define TK_CACHE 198
#define TK_EXPLAIN 199
#define TK_ANALYZE 200
#define TK_VERBOSE 201
#define TK_NK_BOOL 202
#define TK_RATIO 203
#define TK_NK_FLOAT 204
#define TK_OUTPUTTYPE 205
#define TK_AGGREGATE 206
#define TK_BUFSIZE 207
#define TK_LANGUAGE 208
#define TK_REPLACE 209
#define TK_STREAM 210
#define TK_INTO 211
#define TK_PAUSE 212
#define TK_RESUME 213
#define TK_TRIGGER 214
#define TK_AT_ONCE 215
#define TK_WINDOW_CLOSE 216
#define TK_IGNORE 217
#define TK_EXPIRED 218
#define TK_FILL_HISTORY 219
#define TK_UPDATE 220
#define TK_SUBTABLE 221
#define TK_UNTREATED 222
#define TK_KILL 223
#define TK_CONNECTION 224
#define TK_TRANSACTION 225
#define TK_BALANCE 226
#define TK_VGROUP 227
#define TK_LEADER 228
#define TK_MERGE 229
#define TK_REDISTRIBUTE 230
#define TK_SPLIT 231
#define TK_DELETE 232
#define TK_INSERT 233
#define TK_NULL 234
#define TK_NK_QUESTION 235
#define TK_NK_ALIAS 236
#define TK_NK_ARROW 237
#define TK_ROWTS 238
#define TK_QSTART 239
#define TK_QEND 240
#define TK_QDURATION 241
#define TK_WSTART 242
#define TK_WEND 243
#define TK_WDURATION 244
#define TK_IROWTS 245
#define TK_ISFILLED 246
#define TK_CAST 247
#define TK_NOW 248
#define TK_TODAY 249
#define TK_TIMEZONE 250
#define TK_CLIENT_VERSION 251
#define TK_SERVER_VERSION 252
#define TK_SERVER_STATUS 253
#define TK_CURRENT_USER 254
#define TK_CASE 255
#define TK_WHEN 256
#define TK_THEN 257
#define TK_ELSE 258
#define TK_BETWEEN 259
#define TK_IS 260
#define TK_NK_LT 261
#define TK_NK_GT 262
#define TK_NK_LE 263
#define TK_NK_GE 264
#define TK_NK_NE 265
#define TK_MATCH 266
#define TK_NMATCH 267
#define TK_CONTAINS 268
#define TK_IN 269
#define TK_JOIN 270
#define TK_INNER 271
#define TK_SELECT 272
#define TK_NK_HINT 273
#define TK_DISTINCT 274
#define TK_WHERE 275
#define TK_PARTITION 276
#define TK_BY 277
#define TK_SESSION 278
#define TK_STATE_WINDOW 279
#define TK_EVENT_WINDOW 280
#define TK_COUNT_WINDOW 281
#define TK_SLIDING 282
#define TK_FILL 283
#define TK_VALUE 284
#define TK_VALUE_F 285
#define TK_NONE 286
#define TK_PREV 287
#define TK_NULL_F 288
#define TK_LINEAR 289
#define TK_NEXT 290
#define TK_HAVING 291
#define TK_RANGE 292
#define TK_EVERY 293
#define TK_ORDER 294
#define TK_SLIMIT 295
#define TK_SOFFSET 296
#define TK_LIMIT 297
#define TK_OFFSET 298
#define TK_ASC 299
#define TK_NULLS 300
#define TK_ABORT 301
#define TK_AFTER 302
#define TK_ATTACH 303
#define TK_BEFORE 304
#define TK_BEGIN 305
#define TK_BITAND 306
#define TK_BITNOT 307
#define TK_BITOR 308
#define TK_BLOCKS 309
#define TK_CHANGE 310
#define TK_COMMA 311
#define TK_CONCAT 312
#define TK_CONFLICT 313
#define TK_COPY 314
#define TK_DEFERRED 315
#define TK_DELIMITERS 316
#define TK_DETACH 317
#define TK_DIVIDE 318
#define TK_DOT 319
#define TK_EACH 320
#define TK_FAIL 321
#define TK_FILE 322
#define TK_FOR 323
#define TK_GLOB 324
#define TK_ID 325
#define TK_IMMEDIATE 326
#define TK_IMPORT 327
#define TK_INITIALLY 328
#define TK_INSTEAD 329
#define TK_ISNULL 330
#define TK_KEY 331
#define TK_MODULES 332
#define TK_NK_BITNOT 333
#define TK_NK_SEMI 334
#define TK_NOTNULL 335
#define TK_OF 336
#define TK_PLUS 337
#define TK_PRIVILEGE 338
#define TK_RAISE 339
#define TK_RESTRICT 340
#define TK_ROW 341
#define TK_SEMI 342
#define TK_STAR 343
#define TK_STATEMENT 344
#define TK_STRICT 345
#define TK_STRING 346
#define TK_TIMES 347
#define TK_VALUES 348
#define TK_VARIABLE 349
#define TK_WAL 350
#define TK_NK_SPACE 600
#define TK_NK_COMMENT 601
@ -374,6 +379,7 @@
#define TK_NO_BATCH_SCAN 607
#define TK_SORT_FOR_GROUP 608
#define TK_PARTITION_FIRST 609
#define TK_PARA_TABLES_SORT 610
#define TK_NK_NIL 65535

View File

@ -18,16 +18,15 @@
// message process
int32_t tqStreamTaskStartAsync(SStreamMeta* pMeta, SMsgCb* cb, bool restart);
int32_t tqStreamOneTaskStartAsync(SStreamMeta* pMeta, SMsgCb* cb, int64_t streamId, int32_t taskId);
int32_t tqStreamStartOneTaskAsync(SStreamMeta* pMeta, SMsgCb* cb, int64_t streamId, int32_t taskId);
int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pMsg, bool restored);
int32_t tqStreamTaskProcessDispatchReq(SStreamMeta* pMeta, SRpcMsg* pMsg);
int32_t tqStreamTaskProcessDispatchRsp(SStreamMeta* pMeta, SRpcMsg* pMsg);
int32_t tqStreamTaskProcessRetrieveReq(SStreamMeta* pMeta, SRpcMsg* pMsg);
int32_t tqStreamTaskProcessScanHistoryFinishReq(SStreamMeta* pMeta, SRpcMsg* pMsg);
int32_t tqStreamTaskProcessScanHistoryFinishRsp(SStreamMeta* pMeta, SRpcMsg* pMsg);
int32_t tqStreamTaskProcessCheckReq(SStreamMeta* pMeta, SRpcMsg* pMsg);
int32_t tqStreamTaskProcessCheckRsp(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader);
int32_t tqStreamTaskProcessCheckpointReadyMsg(SStreamMeta* pMeta, SRpcMsg* pMsg);
int32_t tqStreamProcessStreamHbRsp(SStreamMeta* pMeta, SRpcMsg* pMsg);
int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sversion, char* msg, int32_t msgLen,
bool isLeader, bool restored);
int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen);

View File

@ -358,7 +358,7 @@ int32_t catalogGetUdfInfo(SCatalog* pCtg, SRequestConnInfo* pConn, const char* f
int32_t catalogChkAuth(SCatalog* pCtg, SRequestConnInfo* pConn, SUserAuthInfo *pAuth, SUserAuthRes* pRes);
int32_t catalogChkAuthFromCache(SCatalog* pCtg, SUserAuthInfo *pAuth, SUserAuthRes* pRes, bool* exists);
int32_t catalogChkAuthFromCache(SCatalog* pCtg, SUserAuthInfo *pAuth, SUserAuthRes* pRes, bool* exists);
int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth);

View File

@ -197,6 +197,8 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
void qStreamSetOpen(qTaskInfo_t tinfo);
void qStreamSetSourceExcluded(qTaskInfo_t tinfo, int8_t sourceExcluded);
void qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset);
SMqMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo);
@ -210,11 +212,9 @@ void* qExtractReaderFromStreamScanner(void* scanner);
int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner);
int32_t qSetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo);
int32_t qResetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo);
int32_t qStreamSourceScanParamForHistoryScanStep1(qTaskInfo_t tinfo, SVersionRange *pVerRange, STimeWindow* pWindow);
int32_t qStreamSourceScanParamForHistoryScanStep2(qTaskInfo_t tinfo, SVersionRange *pVerRange, STimeWindow* pWindow);
int32_t qStreamRecoverFinish(qTaskInfo_t tinfo);
int32_t qRestoreStreamOperatorOption(qTaskInfo_t tinfo);
bool qStreamScanhistoryFinished(qTaskInfo_t tinfo);
int32_t qStreamInfoResetTimewindowFilter(qTaskInfo_t tinfo);
void resetTaskInfo(qTaskInfo_t tinfo);

View File

@ -211,6 +211,7 @@ typedef struct SStoreTqReader {
bool (*tqNextBlockImpl)(); // todo remove it
SSDataBlock* (*tqGetResultBlock)();
int64_t (*tqGetResultBlockTime)();
int32_t (*tqGetStreamExecProgress)();
void (*tqReaderSetColIdList)();
int32_t (*tqReaderSetQueryTableList)();
@ -266,16 +267,11 @@ typedef struct SStoreMeta {
// support filter and non-filter cases. [vnodeGetCtbIdList & vnodeGetCtbIdListByFilter]
int32_t (*getChildTableList)(void* pVnode, int64_t suid, SArray* list);
int32_t (*storeGetTableList)(void* pVnode, int8_t type, SArray* pList);
void* storeGetVersionRange;
void* storeGetLastTimestamp;
int32_t (*getTableSchema)(void* pVnode, int64_t uid, STSchema** pSchema, int64_t* suid); // tsdbGetTableSchema
int32_t (*getTableSchema)(void* pVnode, int64_t uid, STSchema** pSchema, int64_t* suid);
int32_t (*getNumOfChildTables)(void* pVnode, int64_t uid, int64_t* numOfTables, int32_t* numOfCols);
void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables,
int64_t* numOfNormalTables);
int64_t (*getNumOfRowsInMem)(void* pVnode);
SMCtbCursor* (*openCtbCursor)(void* pVnode, tb_uid_t uid, int lock);
int32_t (*resumeCtbCursor)(SMCtbCursor* pCtbCur, int8_t first);
void (*pauseCtbCursor)(SMCtbCursor* pCtbCur);
@ -356,14 +352,19 @@ typedef struct SStateStore {
int32_t (*streamStateSessionPut)(SStreamState* pState, const SSessionKey* key, void* value, int32_t vLen);
int32_t (*streamStateSessionGet)(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen);
int32_t (*streamStateSessionDel)(SStreamState* pState, const SSessionKey* key);
int32_t (*streamStateSessionReset)(SStreamState* pState, void* pVal);
int32_t (*streamStateSessionClear)(SStreamState* pState);
int32_t (*streamStateSessionGetKVByCur)(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
int32_t (*streamStateStateAddIfNotExist)(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
int32_t (*streamStateSessionGetKeyByRange)(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
int32_t (*streamStateCountGetKeyByRange)(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
int32_t (*streamStateSessionAllocWinBuffByNextPosition)(SStreamState* pState, SStreamStateCur* pCur,
const SSessionKey* pKey, void** pVal, int32_t* pVLen);
int32_t (*streamStateCountWinAddIfNotExist)(SStreamState* pState, SSessionKey* pKey, COUNT_TYPE winCount, void** ppVal, int32_t* pVLen);
int32_t (*streamStateCountWinAdd)(SStreamState* pState, SSessionKey* pKey, void** pVal, int32_t* pVLen);
SUpdateInfo* (*updateInfoInit)(int64_t interval, int32_t precision, int64_t watermark, bool igUp);
TSKEY (*updateInfoFillBlockData)(SUpdateInfo* pInfo, SSDataBlock* pBlock, int32_t primaryTsCol);
bool (*updateInfoIsUpdated)(SUpdateInfo* pInfo, uint64_t tableId, TSKEY ts);
@ -381,6 +382,7 @@ typedef struct SStateStore {
int32_t (*updateInfoDeserialize)(void* buf, int32_t bufLen, SUpdateInfo* pInfo);
SStreamStateCur* (*streamStateSessionSeekKeyNext)(SStreamState* pState, const SSessionKey* key);
SStreamStateCur* (*streamStateCountSeekKeyPrev)(SStreamState* pState, const SSessionKey* pKey, COUNT_TYPE count);
SStreamStateCur* (*streamStateSessionSeekKeyCurrentPrev)(SStreamState* pState, const SSessionKey* key);
SStreamStateCur* (*streamStateSessionSeekKeyCurrentNext)(SStreamState* pState, const SSessionKey* key);

View File

@ -114,6 +114,7 @@ typedef struct SInputColumnInfoData {
int32_t totalRows; // total rows in current columnar data
int32_t startRowIndex; // handle started row index
int64_t numOfRows; // the number of rows needs to be handled
bool blankFill; // fill blank data to block for empty table
int32_t numOfInputCols; // PTS is not included
bool colDataSMAIsSet; // if agg is set or not
SColumnInfoData *pPTS; // primary timestamp column

View File

@ -126,6 +126,7 @@ typedef enum EFunctionType {
FUNCTION_TYPE_TAGS,
FUNCTION_TYPE_TBUID,
FUNCTION_TYPE_VGID,
FUNCTION_TYPE_VGVER,
// internal function
FUNCTION_TYPE_SELECT_VALUE = 3750,
@ -243,7 +244,7 @@ bool fmIsSkipScanCheckFunc(int32_t funcId);
void getLastCacheDataType(SDataType* pType);
SFunctionNode* createFunction(const char* pName, SNodeList* pParameterList);
int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMergeFunc);
int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMidFunc, SFunctionNode** pMergeFunc);
typedef enum EFuncDataRequired {
FUNC_DATA_REQUIRED_DATA_LOAD = 1,

View File

@ -420,6 +420,12 @@ typedef struct SDropCGroupStmt {
bool ignoreNotExists;
} SDropCGroupStmt;
typedef struct SAlterClusterStmt {
ENodeType type;
char config[TSDB_DNODE_CONFIG_LEN];
char value[TSDB_CLUSTER_VALUE_LEN];
} SAlterClusterStmt;
typedef struct SAlterLocalStmt {
ENodeType type;
char config[TSDB_DNODE_CONFIG_LEN];

View File

@ -121,6 +121,7 @@ typedef struct SScanLogicNode {
bool filesetDelimited; // returned blocks delimited by fileset
bool isCountByTag; // true if selectstmt hasCountFunc & part by tag/tbname
SArray* pFuncTypes; // for last, last_row
bool paraTablesSort; // for table merge scan
} SScanLogicNode;
typedef struct SJoinLogicNode {
@ -244,7 +245,8 @@ typedef enum EWindowType {
WINDOW_TYPE_INTERVAL = 1,
WINDOW_TYPE_SESSION,
WINDOW_TYPE_STATE,
WINDOW_TYPE_EVENT
WINDOW_TYPE_EVENT,
WINDOW_TYPE_COUNT
} EWindowType;
typedef enum EWindowAlgorithm {
@ -257,6 +259,7 @@ typedef enum EWindowAlgorithm {
SESSION_ALGO_STREAM_FINAL,
SESSION_ALGO_STREAM_SINGLE,
SESSION_ALGO_MERGE,
INTERVAL_ALGO_STREAM_MID,
} EWindowAlgorithm;
typedef struct SWindowLogicNode {
@ -281,6 +284,8 @@ typedef struct SWindowLogicNode {
int8_t igCheckUpdate;
EWindowAlgorithm windowAlgo;
bool isPartTb;
int64_t windowCount;
int64_t windowSliding;
} SWindowLogicNode;
typedef struct SFillLogicNode {
@ -439,6 +444,7 @@ typedef struct STableScanPhysiNode {
int8_t igCheckUpdate;
bool filesetDelimited;
bool needCountEmptyTable;
bool paraTablesSort;
} STableScanPhysiNode;
typedef STableScanPhysiNode STableSeqScanPhysiNode;
@ -587,6 +593,7 @@ typedef SIntervalPhysiNode SMergeAlignedIntervalPhysiNode;
typedef SIntervalPhysiNode SStreamIntervalPhysiNode;
typedef SIntervalPhysiNode SStreamFinalIntervalPhysiNode;
typedef SIntervalPhysiNode SStreamSemiIntervalPhysiNode;
typedef SIntervalPhysiNode SStreamMidIntervalPhysiNode;
typedef struct SFillPhysiNode {
SPhysiNode node;
@ -629,6 +636,14 @@ typedef struct SEventWinodwPhysiNode {
typedef SEventWinodwPhysiNode SStreamEventWinodwPhysiNode;
typedef struct SCountWinodwPhysiNode {
SWindowPhysiNode window;
int64_t windowCount;
int64_t windowSliding;
} SCountWinodwPhysiNode;
typedef SCountWinodwPhysiNode SStreamCountWinodwPhysiNode;
typedef struct SSortPhysiNode {
SPhysiNode node;
SNodeList* pExprs; // these are expression list of order_by_clause and parameter expression of aggregate function
@ -713,8 +728,10 @@ typedef struct SSubplan {
SNode* pTagCond;
SNode* pTagIndexCond;
bool showRewrite;
int32_t rowsThreshold;
bool isView;
bool isAudit;
bool dynamicRowThreshold;
int32_t rowsThreshold;
} SSubplan;
typedef enum EExplainMode { EXPLAIN_MODE_DISABLE = 1, EXPLAIN_MODE_STATIC, EXPLAIN_MODE_ANALYZE } EExplainMode;

View File

@ -128,6 +128,7 @@ typedef enum EHintOption {
HINT_BATCH_SCAN,
HINT_SORT_FOR_GROUP,
HINT_PARTITION_FIRST,
HINT_PARA_TABLES_SORT
} EHintOption;
typedef struct SHintNode {
@ -278,6 +279,13 @@ typedef struct SEventWindowNode {
SNode* pEndCond;
} SEventWindowNode;
typedef struct SCountWindowNode {
ENodeType type; // QUERY_NODE_EVENT_WINDOW
SNode* pCol; // timestamp primary key
int64_t windowCount;
int64_t windowSliding;
} SCountWindowNode;
typedef enum EFillMode {
FILL_MODE_NONE = 1,
FILL_MODE_VALUE,

View File

@ -86,8 +86,10 @@ typedef struct SParseContext {
bool enableSysInfo;
bool async;
bool hasInvisibleCol;
const char* svrVer;
bool isView;
bool isAudit;
bool nodeOffline;
const char* svrVer;
SArray* pTableMetaPos; // sql table pos => catalog data pos
SArray* pTableVgroupPos; // sql table pos => catalog data pos
int64_t allocatorId;
@ -106,7 +108,7 @@ int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCata
const struct SMetaData* pMetaData, SQuery* pQuery);
int32_t qContinueParseSql(SParseContext* pCxt, struct SCatalogReq* pCatalogReq, const struct SMetaData* pMetaData,
SQuery* pQuery);
int32_t qContinueParsePostQuery(SParseContext* pCxt, SQuery* pQuery, void** pResRow);
int32_t qContinueParsePostQuery(SParseContext* pCxt, SQuery* pQuery, SSDataBlock* pBlock);
void qDestroyParseContext(SParseContext* pCxt);
@ -150,7 +152,7 @@ int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsS
STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl,
char* msgBuf, int32_t msgBufLen);
int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash);
int rawBlockBindData(SQuery *query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, TAOS_FIELD *fields, int numFields, bool needChangeLength);
int rawBlockBindData(SQuery *query, STableMeta* pTableMeta, void* data, SVCreateTbReq** pCreateTb, TAOS_FIELD *fields, int numFields, bool needChangeLength);
int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray);
SArray* serializeVgroupsCreateTableBatch(SHashObj* pVgroupHashmap);

View File

@ -32,6 +32,8 @@ typedef struct SPlanContext {
bool streamQuery;
bool rSmaQuery;
bool showRewrite;
bool isView;
bool isAudit;
int8_t triggerType;
int64_t watermark;
int64_t deleteMark;

View File

@ -66,7 +66,11 @@ typedef enum {
#define QUERY_RSP_POLICY_QUICK 1
#define QUERY_MSG_MASK_SHOW_REWRITE() (1 << 0)
#define TEST_SHOW_REWRITE_MASK(m) (((m)&QUERY_MSG_MASK_SHOW_REWRITE()) != 0)
#define QUERY_MSG_MASK_AUDIT() (1 << 1)
#define QUERY_MSG_MASK_VIEW() (1 << 2)
#define TEST_SHOW_REWRITE_MASK(m) (((m) & QUERY_MSG_MASK_SHOW_REWRITE()) != 0)
#define TEST_AUDIT_MASK(m) (((m) & QUERY_MSG_MASK_AUDIT()) != 0)
#define TEST_VIEW_MASK(m) (((m) & QUERY_MSG_MASK_VIEW()) != 0)
typedef struct STableComInfo {
uint8_t numOfTags; // the number of tags in schema
@ -338,6 +342,11 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
#define IS_SYS_DBNAME(_dbname) (IS_INFORMATION_SCHEMA_DB(_dbname) || IS_PERFORMANCE_SCHEMA_DB(_dbname))
#define IS_AUDIT_DBNAME(_dbname) ((*(_dbname) == 'a') && (0 == strcmp(_dbname, TSDB_AUDIT_DB)))
#define IS_AUDIT_STB_NAME(_stbname) ((*(_stbname) == 'o') && (0 == strcmp(_stbname, TSDB_AUDIT_STB_OPERATION)))
#define IS_AUDIT_CTB_NAME(_ctbname) \
((*(_ctbname) == 't') && (0 == strncmp(_ctbname, TSDB_AUDIT_CTB_OPERATION, TSDB_AUDIT_CTB_OPERATION_LEN)))
#define qFatal(...) \
do { \
if (qDebugFlag & DEBUG_FATAL) { \

View File

@ -96,9 +96,7 @@ int32_t winDurFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu
int32_t qStartTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t qEndTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t qTbnameFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t qTbUidFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t qVgIdFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t qPseudoTagFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
/* Aggregation functions */
int32_t countScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);

View File

@ -13,6 +13,9 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _STREAM_STATE_H_
#define _STREAM_STATE_H_
#include "tdatablock.h"
#include "rocksdb/c.h"
@ -20,9 +23,6 @@
#include "tsimplehash.h"
#include "tstreamFileState.h"
#ifndef _STREAM_STATE_H_
#define _STREAM_STATE_H_
#ifdef __cplusplus
extern "C" {
#endif
@ -55,13 +55,16 @@ int32_t streamStateSessionAddIfNotExist(SStreamState* pState, SSessionKey* key,
int32_t streamStateSessionPut(SStreamState* pState, const SSessionKey* key, void* value, int32_t vLen);
int32_t streamStateSessionGet(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen);
int32_t streamStateSessionDel(SStreamState* pState, const SSessionKey* key);
int32_t streamStateSessionReset(SStreamState* pState, void* pVal);
int32_t streamStateSessionClear(SStreamState* pState);
int32_t streamStateSessionGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
int32_t streamStateSessionGetKeyByRange(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
int32_t streamStateCountGetKeyByRange(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
int32_t streamStateSessionAllocWinBuffByNextPosition(SStreamState* pState, SStreamStateCur* pCur,
const SSessionKey* pKey, void** pVal, int32_t* pVLen);
SStreamStateCur* streamStateSessionSeekKeyNext(SStreamState* pState, const SSessionKey* key);
SStreamStateCur* streamStateCountSeekKeyPrev(SStreamState* pState, const SSessionKey* pKey, COUNT_TYPE count);
SStreamStateCur* streamStateSessionSeekKeyCurrentPrev(SStreamState* pState, const SSessionKey* key);
SStreamStateCur* streamStateSessionSeekKeyCurrentNext(SStreamState* pState, const SSessionKey* key);
@ -79,6 +82,10 @@ int32_t streamStateReleaseBuf(SStreamState* pState, void* pVal, bool used);
int32_t streamStateClearBuff(SStreamState* pState, void* pVal);
void streamStateFreeVal(void* val);
// count window
int32_t streamStateCountWinAddIfNotExist(SStreamState* pState, SSessionKey* pKey, COUNT_TYPE winCount, void** ppVal, int32_t* pVLen);
int32_t streamStateCountWinAdd(SStreamState* pState, SSessionKey* pKey, void** pVal, int32_t* pVLen);
SStreamStateCur* streamStateGetAndCheckCur(SStreamState* pState, SWinKey* key);
SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key);
SStreamStateCur* streamStateFillSeekKeyNext(SStreamState* pState, const SWinKey* key);
@ -128,10 +135,6 @@ int sessionRangeKeyCmpr(const SSessionKey* pWin1, const SSessionKey* pWin2);
int sessionWinKeyCmpr(const SSessionKey* pWin1, const SSessionKey* pWin2);
int stateSessionKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2);
int stateKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2);
#if 0
char* streamStateSessionDump(SStreamState* pState);
char* streamStateIntervalDump(SStreamState* pState);
#endif
#ifdef __cplusplus
}

View File

@ -13,6 +13,9 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _STREAM_H_
#define _STREAM_H_
#include "os.h"
#include "streamState.h"
#include "tdatablock.h"
@ -26,9 +29,6 @@
extern "C" {
#endif
#ifndef _STREAM_H_
#define _STREAM_H_
#define ONE_MiB_F (1048576.0)
#define ONE_KiB_F (1024.0)
#define SIZE_IN_MiB(_v) ((_v) / ONE_MiB_F)
@ -50,21 +50,21 @@ extern "C" {
(_t)->hTaskInfo.id.streamId = 0; \
} while (0)
#define STREAM_EXEC_T_EXTRACT_WAL_DATA (-1)
#define STREAM_EXEC_T_START_ALL_TASKS (-2)
#define STREAM_EXEC_T_START_ONE_TASK (-3)
#define STREAM_EXEC_T_RESTART_ALL_TASKS (-4)
#define STREAM_EXEC_T_STOP_ALL_TASKS (-5)
#define STREAM_EXEC_T_RESUME_TASK (-6)
#define STREAM_EXEC_T_UPDATE_TASK_EPSET (-7)
#define STREAM_EXEC_T_EXTRACT_WAL_DATA (-1)
#define STREAM_EXEC_T_START_ALL_TASKS (-2)
#define STREAM_EXEC_T_START_ONE_TASK (-3)
#define STREAM_EXEC_T_RESTART_ALL_TASKS (-4)
#define STREAM_EXEC_T_STOP_ALL_TASKS (-5)
#define STREAM_EXEC_T_RESUME_TASK (-6)
#define STREAM_EXEC_T_UPDATE_TASK_EPSET (-7)
typedef struct SStreamTask SStreamTask;
typedef struct SStreamQueue SStreamQueue;
typedef struct SStreamTaskSM SStreamTaskSM;
#define SSTREAM_TASK_VER 3
#define SSTREAM_TASK_INCOMPATIBLE_VER 1
#define SSTREAM_TASK_NEED_CONVERT_VER 2
#define SSTREAM_TASK_VER 3
#define SSTREAM_TASK_INCOMPATIBLE_VER 1
#define SSTREAM_TASK_NEED_CONVERT_VER 2
#define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3
enum {
@ -313,7 +313,7 @@ typedef struct SCheckpointInfo {
int64_t failedId; // record the latest failed checkpoint id
int64_t checkpointingId;
int32_t downstreamAlignNum;
int32_t checkpointNotReadyTasks;
int32_t numOfNotReady;
bool dispatchCheckpointTrigger;
int64_t msgVer;
int32_t transId;
@ -324,12 +324,13 @@ typedef struct SStreamStatus {
int8_t taskStatus;
int8_t downstreamReady; // downstream tasks are all ready now, if this flag is set
int8_t schedStatus;
int32_t schedIdleTime; // idle time before invoke again
int64_t lastExecTs; // last exec time stamp
int8_t statusBackup;
bool appendTranstateBlock; // has append the transfer state data block already
int32_t timerActive; // timer is active
int32_t schedIdleTime; // idle time before invoke again
int32_t timerActive; // timer is active
int64_t lastExecTs; // last exec time stamp
int32_t inScanHistorySentinel;
bool appendTranstateBlock; // has append the transfer state data block already
bool supplementaryWalscan; // complete the supplementary wal scan or not
} SStreamStatus;
typedef struct SDataRange {
@ -404,8 +405,8 @@ typedef struct SHistoryTaskInfo {
int32_t tickCount;
int32_t retryTimes;
int32_t waitInterval;
int64_t haltVer; // offset in wal when halt the stream task
bool operatorOpen; // false by default
int64_t haltVer; // offset in wal when halt the stream task
bool operatorOpen; // false by default
} SHistoryTaskInfo;
typedef struct STaskOutputInfo {
@ -462,22 +463,22 @@ struct SStreamTask {
struct SStreamMeta* pMeta;
SSHashObj* pNameMap;
void* pBackend;
int64_t backendRefId;
char reserve[256];
int8_t subtableWithoutMd5;
char reserve[255];
};
typedef int32_t (*startComplete_fn_t)(struct SStreamMeta*);
typedef struct STaskStartInfo {
int64_t startTs;
int64_t readyTs;
int32_t tasksWillRestart;
int32_t taskStarting; // restart flag, sentinel to guard the restart procedure.
SHashObj* pReadyTaskSet; // tasks that are all ready for running stream processing
SHashObj* pFailedTaskSet; // tasks that are done the check downstream process, may be successful or failed
int64_t elapsedTime;
int32_t restartCount; // restart task counter
startComplete_fn_t completeFn; // complete callback function
int64_t startTs;
int64_t readyTs;
int32_t tasksWillRestart;
int32_t taskStarting; // restart flag, sentinel to guard the restart procedure.
SHashObj* pReadyTaskSet; // tasks that are all ready for running stream processing
SHashObj* pFailedTaskSet; // tasks that are done the check downstream process, may be successful or failed
int64_t elapsedTime;
int32_t restartCount; // restart task counter
startComplete_fn_t completeFn; // complete callback function
} STaskStartInfo;
typedef struct STaskUpdateInfo {
@ -504,7 +505,7 @@ typedef struct SStreamMeta {
int32_t vgId;
int64_t stage;
int32_t role;
bool sendMsgBeforeClosing; // send hb to mnode before close all tasks when switch to follower.
bool sendMsgBeforeClosing; // send hb to mnode before close all tasks when switch to follower.
STaskStartInfo startInfo;
TdThreadRwlock lock;
SScanWalInfo scanInfo;
@ -531,8 +532,8 @@ typedef struct SStreamMeta {
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
int32_t tDecodeStreamEpInfo(SDecoder* pDecoder, SStreamChildEpInfo* pInfo);
SStreamTask* tNewStreamTask(int64_t streamId, int8_t taskLevel, bool fillHistory, int64_t triggerParam,
SArray* pTaskList, bool hasFillhistory);
SStreamTask* tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool fillHistory, int64_t triggerParam,
SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5);
int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
void tFreeStreamTask(SStreamTask* pTask);
@ -629,17 +630,7 @@ typedef struct {
int8_t igUntreated;
} SStreamScanHistoryReq;
typedef struct {
int64_t streamId;
int32_t upstreamTaskId;
int32_t downstreamTaskId;
int32_t upstreamNodeId;
int32_t childId;
} SStreamScanHistoryFinishReq;
int32_t tEncodeStreamScanHistoryFinishReq(SEncoder* pEncoder, const SStreamScanHistoryFinishReq* pReq);
int32_t tDecodeStreamScanHistoryFinishReq(SDecoder* pDecoder, SStreamScanHistoryFinishReq* pReq);
// mndTrigger: denote if this checkpoint is triggered by mnode or as requested from tasks when transfer-state finished
typedef struct {
int64_t streamId;
int64_t checkpointId;
@ -648,6 +639,7 @@ typedef struct {
SEpSet mgmtEps;
int32_t mnodeId;
int32_t transId;
int8_t mndTrigger;
int64_t expireTime;
} SStreamCheckpointSourceReq;
@ -665,7 +657,6 @@ int32_t tEncodeStreamCheckpointSourceReq(SEncoder* pEncoder, const SStreamCheckp
int32_t tDecodeStreamCheckpointSourceReq(SDecoder* pDecoder, SStreamCheckpointSourceReq* pReq);
int32_t tEncodeStreamCheckpointSourceRsp(SEncoder* pEncoder, const SStreamCheckpointSourceRsp* pRsp);
int32_t tDecodeStreamCheckpointSourceRsp(SDecoder* pDecoder, SStreamCheckpointSourceRsp* pRsp);
typedef struct {
SMsgHead msgHead;
@ -687,18 +678,18 @@ typedef struct STaskStatusEntry {
int32_t statusLastDuration; // to record the last duration of current status
int64_t stage;
int32_t nodeId;
int64_t verStart; // start version in WAL, only valid for source task
int64_t verEnd; // end version in WAL, only valid for source task
int64_t processedVer; // only valid for source task
int64_t checkpointId; // current active checkpoint id
int32_t chkpointTransId; // checkpoint trans id
int8_t checkpointFailed; // denote if the checkpoint is failed or not
bool inputQChanging; // inputQ is changing or not
int64_t verStart; // start version in WAL, only valid for source task
int64_t verEnd; // end version in WAL, only valid for source task
int64_t processedVer; // only valid for source task
int64_t checkpointId; // current active checkpoint id
int32_t chkpointTransId; // checkpoint trans id
int8_t checkpointFailed; // denote if the checkpoint is failed or not
bool inputQChanging; // inputQ is changing or not
int64_t inputQUnchangeCounter;
double inputQUsed; // in MiB
double inputQUsed; // in MiB
double inputRate;
double sinkQuota; // existed quota size for sink task
double sinkDataSize; // sink to dst data size
double sinkQuota; // existed quota size for sink task
double sinkDataSize; // sink to dst data size
} STaskStatusEntry;
typedef struct SStreamHbMsg {
@ -712,17 +703,6 @@ int32_t tEncodeStreamHbMsg(SEncoder* pEncoder, const SStreamHbMsg* pRsp);
int32_t tDecodeStreamHbMsg(SDecoder* pDecoder, SStreamHbMsg* pRsp);
void streamMetaClearHbMsg(SStreamHbMsg* pMsg);
typedef struct {
int64_t streamId;
int32_t upstreamTaskId;
int32_t upstreamNodeId;
int32_t downstreamId;
int32_t downstreamNode;
} SStreamCompleteHistoryMsg;
int32_t tEncodeCompleteHistoryDataMsg(SEncoder* pEncoder, const SStreamCompleteHistoryMsg* pReq);
int32_t tDecodeCompleteHistoryDataMsg(SDecoder* pDecoder, SStreamCompleteHistoryMsg* pReq);
typedef struct SNodeUpdateInfo {
int32_t nodeId;
SEpSet prevEp;
@ -740,23 +720,10 @@ int32_t tEncodeStreamTaskUpdateMsg(SEncoder* pEncoder, const SStreamTaskNodeUpda
int32_t tDecodeStreamTaskUpdateMsg(SDecoder* pDecoder, SStreamTaskNodeUpdateMsg* pMsg);
typedef struct SStreamTaskState {
ETaskStatus state;
char* name;
ETaskStatus state;
char* name;
} SStreamTaskState;
typedef struct {
int64_t streamId;
int32_t downstreamTaskId;
int32_t taskId;
} SStreamRecoverDownstreamReq;
typedef struct {
int64_t streamId;
int32_t downstreamTaskId;
int32_t taskId;
SArray* checkpointVer; // SArray<SStreamCheckpointInfo>
} SStreamRecoverDownstreamRsp;
int32_t tEncodeStreamTaskCheckReq(SEncoder* pEncoder, const SStreamTaskCheckReq* pReq);
int32_t tDecodeStreamTaskCheckReq(SDecoder* pDecoder, SStreamTaskCheckReq* pReq);
@ -765,17 +732,27 @@ int32_t tDecodeStreamTaskCheckRsp(SDecoder* pDecoder, SStreamTaskCheckRsp* pRsp)
int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* pReq);
int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq);
void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq);
int32_t tEncodeStreamRetrieveReq(SEncoder* pEncoder, const SStreamRetrieveReq* pReq);
int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq);
void tDeleteStreamRetrieveReq(SStreamRetrieveReq* pReq);
void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq);
typedef struct SStreamTaskCheckpointReq {
int64_t streamId;
int32_t taskId;
int32_t nodeId;
} SStreamTaskCheckpointReq;
int32_t tEncodeStreamTaskCheckpointReq(SEncoder* pEncoder, const SStreamTaskCheckpointReq* pReq);
int32_t tDecodeStreamTaskCheckpointReq(SDecoder* pDecoder, SStreamTaskCheckpointReq* pReq);
int32_t streamSetupScheduleTrigger(SStreamTask* pTask);
int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg);
int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code);
int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pMsg);
int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq);
SStreamChildEpInfo* streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t taskId);
void streamTaskInputFail(SStreamTask* pTask);
@ -787,38 +764,35 @@ bool streamTaskShouldPause(const SStreamTask* pStatus);
bool streamTaskIsIdle(const SStreamTask* pTask);
bool streamTaskReadyToRun(const SStreamTask* pTask, char** pStatus);
char* createStreamTaskIdStr(int64_t streamId, int32_t taskId);
char* createStreamTaskIdStr(int64_t streamId, int32_t taskId);
SStreamTaskState* streamTaskGetStatus(const SStreamTask* pTask);
const char* streamTaskGetStatusStr(ETaskStatus status);
void streamTaskResetStatus(SStreamTask* pTask);
void streamTaskSetStatusReady(SStreamTask* pTask);
const char* streamTaskGetStatusStr(ETaskStatus status);
void streamTaskResetStatus(SStreamTask* pTask);
void streamTaskSetStatusReady(SStreamTask* pTask);
ETaskStatus streamTaskGetPrevStatus(const SStreamTask* pTask);
void initRpcMsg(SRpcMsg* pMsg, int32_t msgType, void* pCont, int32_t contLen);
// recover and fill history
void streamTaskCheckDownstream(SStreamTask* pTask);
int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_t vgId, int64_t stage,
int64_t* oldStage);
int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamId, int32_t vgId, int64_t stage, int64_t* oldStage);
int32_t streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList);
void streamTaskResetUpstreamStageInfo(SStreamTask* pTask);
bool streamTaskIsAllUpstreamClosed(SStreamTask* pTask);
bool streamTaskSetSchedStatusWait(SStreamTask* pTask);
int8_t streamTaskSetSchedStatusActive(SStreamTask* pTask);
int8_t streamTaskSetSchedStatusInactive(SStreamTask* pTask);
int32_t streamTaskClearHTaskAttr(SStreamTask* pTask);
int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, int32_t clearRelHalt, bool metaLock);
int32_t streamTaskHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event);
int32_t streamTaskHandleEventAsync(SStreamTaskSM* pSM, EStreamTaskEvent event, void* pFn);
int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent event);
void streamTaskRestoreStatus(SStreamTask* pTask);
int32_t streamTaskStop(SStreamTask* pTask);
int32_t streamSendCheckRsp(const SStreamMeta* pMeta, const SStreamTaskCheckReq* pReq, SStreamTaskCheckRsp* pRsp,
SRpcHandleInfo* pRpcInfo, int32_t taskId);
int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRsp);
int32_t streamLaunchFillHistoryTask(SStreamTask* pTask);
int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask);
int32_t streamStartScanHistoryAsync(SStreamTask* pTask, int8_t igUntreated);
int32_t streamReExecScanHistoryFuture(SStreamTask* pTask, int32_t idleDuration);
bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer);
@ -826,9 +800,9 @@ bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer)
int32_t streamQueueGetNumOfItems(const SStreamQueue* pQueue);
// common
int32_t streamRestoreParam(SStreamTask* pTask);
void streamTaskPause(SStreamMeta* pMeta, SStreamTask* pTask);
void streamTaskResume(SStreamTask* pTask);
int32_t streamTaskStop(SStreamTask* pTask);
int32_t streamTaskSetUpstreamInfo(SStreamTask* pTask, const SStreamTask* pUpstreamTask);
void streamTaskUpdateUpstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet);
void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet);
@ -839,6 +813,7 @@ void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId);
void streamTaskOpenAllUpstreamInput(SStreamTask* pTask);
int32_t streamTaskSetDb(SStreamMeta* pMeta, void* pTask, char* key);
bool streamTaskIsSinkTask(const SStreamTask* pTask);
int32_t streamTaskSendCheckpointReq(SStreamTask* pTask);
void streamTaskStatusInit(STaskStatusEntry* pEntry, const SStreamTask* pTask);
void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc);
@ -847,16 +822,12 @@ void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc);
int32_t streamSetParamForStreamScannerStep1(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
SScanhistoryDataInfo streamScanHistoryData(SStreamTask* pTask, int64_t st);
int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask);
// agg level
int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistoryFinishReq* pReq, SRpcHandleInfo* pInfo);
int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask);
// stream task meta
void streamMetaInit();
void streamMetaCleanup();
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId, int64_t stage, startComplete_fn_t fn);
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId, int64_t stage,
startComplete_fn_t fn);
void streamMetaClose(SStreamMeta* streamMeta);
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask); // save to stream meta store
int32_t streamMetaRemoveTask(SStreamMeta* pMeta, STaskId* pKey);
@ -866,6 +837,7 @@ int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta);
SStreamTask* streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
SStreamTask* streamMetaAcquireOneTask(SStreamTask* pTask);
void streamMetaClear(SStreamMeta* pMeta);
void streamMetaInitBackend(SStreamMeta* pMeta);
int32_t streamMetaCommit(SStreamMeta* pMeta);
@ -874,22 +846,22 @@ void streamMetaNotifyClose(SStreamMeta* pMeta);
void streamMetaStartHb(SStreamMeta* pMeta);
bool streamMetaTaskInTimer(SStreamMeta* pMeta);
int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs,
int64_t endTs, bool ready);
int64_t endTs, bool ready);
int32_t streamMetaResetTaskStatus(SStreamMeta* pMeta);
void streamMetaRLock(SStreamMeta* pMeta);
void streamMetaRUnLock(SStreamMeta* pMeta);
void streamMetaWLock(SStreamMeta* pMeta);
void streamMetaWUnLock(SStreamMeta* pMeta);
void streamMetaResetStartInfo(STaskStartInfo* pMeta);
SArray* streamMetaSendMsgBeforeCloseTasks(SStreamMeta* pMeta);
void streamMetaUpdateStageRole(SStreamMeta* pMeta, int64_t stage, bool isLeader);
int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta);
int32_t streamMetaStartAllTasks(SStreamMeta* pMeta);
int32_t streamMetaStopAllTasks(SStreamMeta* pMeta);
int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
bool streamMetaAllTasksReady(const SStreamMeta* pMeta);
tmr_h streamTimerGetInstance();
void streamMetaRLock(SStreamMeta* pMeta);
void streamMetaRUnLock(SStreamMeta* pMeta);
void streamMetaWLock(SStreamMeta* pMeta);
void streamMetaWUnLock(SStreamMeta* pMeta);
void streamMetaResetStartInfo(STaskStartInfo* pMeta);
SArray* streamMetaSendMsgBeforeCloseTasks(SStreamMeta* pMeta);
void streamMetaUpdateStageRole(SStreamMeta* pMeta, int64_t stage, bool isLeader);
int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta);
int32_t streamMetaStartAllTasks(SStreamMeta* pMeta);
int32_t streamMetaStopAllTasks(SStreamMeta* pMeta);
int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
bool streamMetaAllTasksReady(const SStreamMeta* pMeta);
tmr_h streamTimerGetInstance();
// checkpoint
int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);
@ -897,7 +869,7 @@ int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask);
int32_t streamTaskBuildCheckpoint(SStreamTask* pTask);
void streamTaskClearCheckInfo(SStreamTask* pTask, bool clearChkpReadyMsg);
int32_t streamAlignTransferState(SStreamTask* pTask);
int32_t streamBuildAndSendDropTaskMsg(SMsgCb* pMsgCb, int32_t vgId, SStreamTaskId* pTaskId);
int32_t streamBuildAndSendDropTaskMsg(SMsgCb* pMsgCb, int32_t vgId, SStreamTaskId* pTaskId, int64_t resetRelHalt);
int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SStreamTask* pTask,
int8_t isSucceed);
int32_t buildCheckpointSourceRsp(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SRpcMsg* pMsg,
@ -905,6 +877,10 @@ int32_t buildCheckpointSourceRsp(SStreamCheckpointSourceReq* pReq, SRpcHandleInf
SStreamTaskSM* streamCreateStateMachine(SStreamTask* pTask);
void* streamDestroyStateMachine(SStreamTaskSM* pSM);
int32_t broadcastRetrieveMsg(SStreamTask* pTask, SStreamRetrieveReq *req);
void sendRetrieveRsp(SStreamRetrieveReq *pReq, SRpcMsg* pRsp);
#ifdef __cplusplus
}
#endif

View File

@ -41,6 +41,8 @@ typedef int32_t (*_state_file_remove_fn)(SStreamFileState* pFileState, const voi
typedef int32_t (*_state_file_get_fn)(SStreamFileState* pFileState, void* pKey, void* data, int32_t* pDataLen);
typedef int32_t (*_state_file_clear_fn)(SStreamState* pState);
typedef int32_t (*range_cmpr_fn)(const SSessionKey* pWin1, const SSessionKey* pWin2);
SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize,
GetTsFun fp, void* pFile, TSKEY delMark, const char* taskId,
int64_t checkpointId, int8_t type);
@ -90,14 +92,19 @@ void sessionWinStateCleanup(void* pBuff);
SStreamStateCur* sessionWinStateSeekKeyCurrentPrev(SStreamFileState* pFileState, const SSessionKey* pWinKey);
SStreamStateCur* sessionWinStateSeekKeyCurrentNext(SStreamFileState* pFileState, const SSessionKey* pWinKey);
SStreamStateCur* sessionWinStateSeekKeyNext(SStreamFileState* pFileState, const SSessionKey* pWinKey);
SStreamStateCur* countWinStateSeekKeyPrev(SStreamFileState* pFileState, const SSessionKey* pWinKey, COUNT_TYPE count);
int32_t sessionWinStateGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
int32_t sessionWinStateMoveToNext(SStreamStateCur* pCur);
int32_t sessionWinStateGetKeyByRange(SStreamFileState* pFileState, const SSessionKey* key, SSessionKey* curKey);
int32_t sessionWinStateGetKeyByRange(SStreamFileState* pFileState, const SSessionKey* key, SSessionKey* curKey, range_cmpr_fn cmpFn);
// state window
int32_t getStateWinResultBuff(SStreamFileState* pFileState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
// count window
int32_t getCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, COUNT_TYPE winCount, void** pVal, int32_t* pVLen);
int32_t createCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, void** pVal, int32_t* pVLen);
#ifdef __cplusplus
}
#endif

View File

@ -222,6 +222,10 @@ void syslog(int unused, const char *format, ...);
do { \
prctl(PR_SET_NAME, (name)); \
} while (0)
#define getThreadName(name) \
do { \
prctl(PR_GET_NAME, (name)); \
} while (0)
#endif
#else
// Windows

View File

@ -119,6 +119,8 @@ int32_t taosSetFileHandlesLimit();
int32_t taosLinkFile(char *src, char *dst);
bool lastErrorIsFileNotExist();
#ifdef __cplusplus
}
#endif

View File

@ -69,6 +69,19 @@ typedef pthread_key_t TdThreadKey;
#define taosThreadCleanupPush pthread_cleanup_push
#define taosThreadCleanupPop pthread_cleanup_pop
#if !defined(WINDOWS)
#if defined(_TD_DARWIN_64) // MACOS
#define taosThreadRwlockAttrSetKindNP(A, B) ((void)0)
#else // LINUX
#if _XOPEN_SOURCE >= 500 || _POSIX_C_SOURCE >= 200809L
#define taosThreadRwlockAttrSetKindNP(A, B) pthread_rwlockattr_setkind_np(A, B)
#else
#define taosThreadRwlockAttrSetKindNP(A, B) ((void)0)
#endif
#endif
#else // WINDOWS
#define taosThreadRwlockAttrSetKindNP(A, B) ((void)0)
#endif
#if defined(WINDOWS) && !defined(__USE_PTHREAD)
#define TD_PTHREAD_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER_FORBID

View File

@ -126,6 +126,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_IP_NOT_IN_WHITE_LIST TAOS_DEF_ERROR_CODE(0, 0x0134)
#define TSDB_CODE_FAILED_TO_CONNECT_S3 TAOS_DEF_ERROR_CODE(0, 0x0135)
#define TSDB_CODE_MSG_PREPROCESSED TAOS_DEF_ERROR_CODE(0, 0x0136) // internal
//client
#define TSDB_CODE_TSC_INVALID_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0200)
@ -413,6 +414,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MNODE_ONLY_TWO_MNODE TAOS_DEF_ERROR_CODE(0, 0x0414) // internal
#define TSDB_CODE_MNODE_NO_NEED_RESTORE TAOS_DEF_ERROR_CODE(0, 0x0415) // internal
#define TSDB_CODE_DNODE_ONLY_USE_WHEN_OFFLINE TAOS_DEF_ERROR_CODE(0, 0x0416)
#define TSDB_CODE_DNODE_NO_MACHINE_CODE TAOS_DEF_ERROR_CODE(0, 0x0417)
// mnode-sma
#define TSDB_CODE_MND_SMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0480)
@ -521,6 +523,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_QRY_INVALID_INPUT TAOS_DEF_ERROR_CODE(0, 0x070F)
// #define TSDB_CODE_QRY_INVALID_SCHEMA_VERSION TAOS_DEF_ERROR_CODE(0, 0x0710) // 2.x
// #define TSDB_CODE_QRY_RESULT_TOO_LARGE TAOS_DEF_ERROR_CODE(0, 0x0711) // 2.x
#define TSDB_CODE_QRY_INVALID_WINDOW_CONDITION TAOS_DEF_ERROR_CODE(0, 0x0712)
#define TSDB_CODE_QRY_SCH_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0720)
#define TSDB_CODE_QRY_TASK_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0721)
#define TSDB_CODE_QRY_TASK_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0722)
@ -529,7 +532,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_QRY_TASK_DROPPED TAOS_DEF_ERROR_CODE(0, 0x0725)
#define TSDB_CODE_QRY_TASK_CANCELLING TAOS_DEF_ERROR_CODE(0, 0x0726)
#define TSDB_CODE_QRY_TASK_DROPPING TAOS_DEF_ERROR_CODE(0, 0x0727)
#define TSDB_CODE_QRY_DUPLICATTED_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0728)
#define TSDB_CODE_QRY_DUPLICATED_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0728)
#define TSDB_CODE_QRY_TASK_MSG_ERROR TAOS_DEF_ERROR_CODE(0, 0x0729)
#define TSDB_CODE_QRY_JOB_FREED TAOS_DEF_ERROR_CODE(0, 0x072A)
#define TSDB_CODE_QRY_TASK_STATUS_ERROR TAOS_DEF_ERROR_CODE(0, 0x072B)
@ -552,7 +555,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_GRANT_STREAM_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0807)
#define TSDB_CODE_GRANT_SPEED_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0808)
#define TSDB_CODE_GRANT_STORAGE_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0809)
#define TSDB_CODE_GRANT_QUERYTIME_LIMITED TAOS_DEF_ERROR_CODE(0, 0x080A)
#define TSDB_CODE_GRANT_SUBSCRIPTION_LIMITED TAOS_DEF_ERROR_CODE(0, 0x080A)
#define TSDB_CODE_GRANT_CPU_LIMITED TAOS_DEF_ERROR_CODE(0, 0x080B)
#define TSDB_CODE_GRANT_STABLE_LIMITED TAOS_DEF_ERROR_CODE(0, 0x080C)
#define TSDB_CODE_GRANT_TABLE_LIMITED TAOS_DEF_ERROR_CODE(0, 0x080D)
@ -561,8 +564,17 @@ int32_t* taosGetErrno();
#define TSDB_CODE_GRANT_PAR_DEC_IVLD_KEY TAOS_DEF_ERROR_CODE(0, 0x0810)
#define TSDB_CODE_GRANT_PAR_DEC_IVLD_KLEN TAOS_DEF_ERROR_CODE(0, 0x0811)
#define TSDB_CODE_GRANT_GEN_IVLD_KEY TAOS_DEF_ERROR_CODE(0, 0x0812)
#define TSDB_CODE_GRANT_GEN_APP_LIMIT TAOS_DEF_ERROR_CODE(0, 0x0813)
#define TSDB_CODE_GRANT_GEN_ACTIVE_LEN TAOS_DEF_ERROR_CODE(0, 0x0813)
#define TSDB_CODE_GRANT_GEN_ENC_IVLD_KLEN TAOS_DEF_ERROR_CODE(0, 0x0814)
#define TSDB_CODE_GRANT_PAR_IVLD_DIST TAOS_DEF_ERROR_CODE(0, 0x0815)
#define TSDB_CODE_GRANT_UNLICENSED_CLUSTER TAOS_DEF_ERROR_CODE(0, 0x0816)
#define TSDB_CODE_GRANT_LACK_OF_BASIC TAOS_DEF_ERROR_CODE(0, 0x0817)
#define TSDB_CODE_GRANT_OBJ_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0818)
#define TSDB_CODE_GRANT_LAST_ACTIVE_NOT_FOUND TAOS_DEF_ERROR_CODE(0, 0x0819)
#define TSDB_CODE_GRANT_MACHINES_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x0820)
#define TSDB_CODE_GRANT_OPT_EXPIRE_TOO_LARGE TAOS_DEF_ERROR_CODE(0, 0x0821)
#define TSDB_CODE_GRANT_DUPLICATED_ACTIVE TAOS_DEF_ERROR_CODE(0, 0x0822)
#define TSDB_CODE_GRANT_VIEW_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0823)
// sync
// #define TSDB_CODE_SYN_INVALID_CONFIG TAOS_DEF_ERROR_CODE(0, 0x0900) // 2.x

35
include/util/tbase58.h Normal file
View File

@ -0,0 +1,35 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_UTIL_BASE58_H_
#define _TD_UTIL_BASE58_H_
#include "os.h"
#ifdef __cplusplus
extern "C" {
#endif
#define TBASE_MAX_ILEN 4096
#define TBASE_MAX_OLEN 5653
uint8_t *base58_decode(const char *value, size_t inlen, int32_t *outlen);
char *base58_encode(const uint8_t *value, int32_t vlen);
#ifdef __cplusplus
}
#endif
#endif /*_TD_UTIL_BASE58_H_*/

View File

@ -264,6 +264,7 @@ typedef enum ELogicConditionType {
#define TSDB_JOB_STATUS_LEN 32
#define TSDB_CLUSTER_ID_LEN 40
#define TSDB_MACHINE_ID_LEN 24
#define TSDB_FQDN_LEN 128
#define TSDB_EP_LEN (TSDB_FQDN_LEN + 6)
#define TSDB_IPv4ADDR_LEN 16
@ -285,6 +286,9 @@ typedef enum ELogicConditionType {
#define TSDB_DNODE_CONFIG_LEN 128
#define TSDB_DNODE_VALUE_LEN 256
#define TSDB_CLUSTER_VALUE_LEN 1000
#define TSDB_GRANT_LOG_COL_LEN 15600
#define TSDB_ACTIVE_KEY_LEN 109
#define TSDB_CONN_ACTIVE_KEY_LEN 255

View File

@ -299,6 +299,8 @@ void doSetOneRowPtr(SReqResultInfo* pResultInfo);
void setResPrecision(SReqResultInfo* pResInfo, int32_t precision);
int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4,
bool freeAfterUse);
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows,
bool convertUcs4);
void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols);
void doFreeReqResultInfo(SReqResultInfo* pResInfo);
int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, SArray** pReq);

View File

@ -681,8 +681,9 @@ void taos_init_imp(void) {
snprintf(logDirName, 64, "taoslog");
#endif
if (taosCreateLog(logDirName, 10, configDir, NULL, NULL, NULL, NULL, 1) != 0) {
// ignore create log failed, only print
printf(" WARING: Create %s failed:%s. configDir=%s\n", logDirName, strerror(errno), configDir);
tscInitRes = -1;
return;
}
if (taosInitCfg(configDir, NULL, NULL, NULL, NULL, 1) != 0) {
@ -750,8 +751,11 @@ int taos_options_imp(TSDB_OPTION option, const char *str) {
tstrncpy(configDir, str, PATH_MAX);
tscInfo("set cfg:%s to %s", configDir, str);
return 0;
} else {
taos_init(); // initialize global config
}
// initialize global config
if (taos_init() != 0) {
return -1;
}
SConfig *pCfg = taosGetCfg();

View File

@ -843,7 +843,7 @@ int32_t hbGetExpiredViewInfo(SClientHbKey *connKey, struct SCatalog *pCatalog, S
view->version = htonl(view->version);
}
tscDebug("hb got %d expired view, valueLen:%lu", viewNum, sizeof(SViewVersion) * viewNum);
tscDebug("hb got %u expired view, valueLen:%lu", viewNum, sizeof(SViewVersion) * viewNum);
if (NULL == req->info) {
req->info = taosHashInit(64, hbKeyHashFunc, 1, HASH_ENTRY_LOCK);

View File

@ -880,19 +880,21 @@ static bool incompletaFileParsing(SNode* pStmt) {
return QUERY_NODE_VNODE_MODIFY_STMT != nodeType(pStmt) ? false : ((SVnodeModifyOpStmt*)pStmt)->fileProcessing;
}
void continuePostSubQuery(SRequestObj* pRequest, TAOS_ROW row) {
void continuePostSubQuery(SRequestObj* pRequest, SSDataBlock* pBlock) {
SSqlCallbackWrapper* pWrapper = pRequest->pWrapper;
int32_t code = nodesAcquireAllocator(pWrapper->pParseCtx->allocatorId);
int32_t code = nodesAcquireAllocator(pWrapper->pParseCtx->allocatorId);
if (TSDB_CODE_SUCCESS == code) {
int64_t analyseStart = taosGetTimestampUs();
code = qContinueParsePostQuery(pWrapper->pParseCtx, pRequest->pQuery, (void**)row);
code = qContinueParsePostQuery(pWrapper->pParseCtx, pRequest->pQuery, pBlock);
pRequest->metric.analyseCostUs += taosGetTimestampUs() - analyseStart;
}
if (TSDB_CODE_SUCCESS == code) {
code = qContinuePlanPostQuery(pRequest->pPostPlan);
}
nodesReleaseAllocator(pWrapper->pParseCtx->allocatorId);
nodesReleaseAllocator(pWrapper->pParseCtx->allocatorId);
handleQueryAnslyseRes(pWrapper, NULL, code);
}
@ -916,6 +918,43 @@ void returnToUser(SRequestObj* pRequest) {
}
}
static SSDataBlock* createResultBlock(TAOS_RES* pRes, int32_t numOfRows) {
int64_t lastTs = 0;
TAOS_FIELD* pResFields = taos_fetch_fields(pRes);
int32_t numOfFields = taos_num_fields(pRes);
SSDataBlock* pBlock = createDataBlock();
for(int32_t i = 0; i < numOfFields; ++i) {
SColumnInfoData colInfoData = createColumnInfoData(pResFields[i].type, pResFields[i].bytes, i + 1);
blockDataAppendColInfo(pBlock, &colInfoData);
}
blockDataEnsureCapacity(pBlock, numOfRows);
for (int32_t i = 0; i < numOfRows; ++i) {
TAOS_ROW pRow = taos_fetch_row(pRes);
int64_t ts = *(int64_t*)pRow[0];
if (lastTs < ts) {
lastTs = ts;
}
for(int32_t j = 0; j < numOfFields; ++j) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, j);
colDataSetVal(pColInfoData, i, pRow[j], false);
}
tscDebug("lastKey:%" PRId64 " vgId:%d, vgVer:%" PRId64, ts, *(int32_t*)pRow[1], *(int64_t*)pRow[2]);
}
pBlock->info.window.ekey = lastTs;
pBlock->info.rows = numOfRows;
tscDebug("lastKey:%"PRId64" numOfRows:%d from all vgroups", lastTs, numOfRows);
return pBlock;
}
void postSubQueryFetchCb(void* param, TAOS_RES* res, int32_t rowNum) {
SRequestObj* pRequest = (SRequestObj*)res;
if (pRequest->code) {
@ -923,19 +962,17 @@ void postSubQueryFetchCb(void* param, TAOS_RES* res, int32_t rowNum) {
return;
}
TAOS_ROW row = NULL;
if (rowNum > 0) {
row = taos_fetch_row(res); // for single row only now
}
SSDataBlock* pBlock = createResultBlock(res, rowNum);
SRequestObj* pNextReq = acquireRequest(pRequest->relation.nextRefId);
if (pNextReq) {
continuePostSubQuery(pNextReq, row);
continuePostSubQuery(pNextReq, pBlock);
releaseRequest(pRequest->relation.nextRefId);
} else {
tscError("0x%" PRIx64 ", next req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pRequest->self,
pRequest->relation.nextRefId, pRequest->requestId);
}
blockDataDestroy(pBlock);
}
void handlePostSubQuery(SSqlCallbackWrapper* pWrapper) {
@ -1117,6 +1154,8 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
.mgmtEpSet = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp),
.pAstRoot = pQuery->pRoot,
.showRewrite = pQuery->showRewrite,
.isView = pWrapper->pParseCtx->isView,
.isAudit = pWrapper->pParseCtx->isAudit,
.pMsg = pRequest->msgBuf,
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE,
.pUser = pRequest->pTscObj->user,
@ -1795,6 +1834,7 @@ int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols) {
static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) {
char* p = (char*)pResultInfo->pData;
int32_t blockVersion = *(int32_t*)p;
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
// length |
@ -1810,7 +1850,7 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
char* pStart = p + len;
for (int32_t i = 0; i < numOfCols; ++i) {
int32_t colLen = htonl(colLength[i]);
int32_t colLen = (blockVersion == BLOCK_VERSION_1) ? htonl(colLength[i]) : colLength[i];
if (pResultInfo->fields[i].type == TSDB_DATA_TYPE_JSON) {
int32_t* offset = (int32_t*)pStart;
@ -1873,6 +1913,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
tscDebug("start to convert form json format string");
char* p = (char*)pResultInfo->pData;
int32_t blockVersion = *(int32_t*)p;
int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows);
if (dataLen <= 0) {
return TSDB_CODE_TSC_INTERNAL_ERROR;
@ -1908,8 +1949,8 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
char* pStart = p;
char* pStart1 = p1;
for (int32_t i = 0; i < numOfCols; ++i) {
int32_t colLen = htonl(colLength[i]);
int32_t colLen1 = htonl(colLength1[i]);
int32_t colLen = (blockVersion == BLOCK_VERSION_1) ? htonl(colLength[i]) : colLength[i];
int32_t colLen1 = (blockVersion == BLOCK_VERSION_1) ? htonl(colLength1[i]) : colLength1[i];
if (ASSERT(colLen < dataLen)) {
tscError("doConvertJson error: colLen:%d >= dataLen:%d", colLen, dataLen);
return TSDB_CODE_TSC_INTERNAL_ERROR;
@ -1968,7 +2009,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
}
colLen1 = len;
totalLen += colLen1;
colLength1[i] = htonl(len);
colLength1[i] = (blockVersion == BLOCK_VERSION_1) ? htonl(len) : len;
} else if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) {
len = numOfRows * sizeof(int32_t);
memcpy(pStart1, pStart, len);
@ -2057,7 +2098,9 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
char* pStart = p;
for (int32_t i = 0; i < numOfCols; ++i) {
colLength[i] = htonl(colLength[i]);
if(blockVersion == BLOCK_VERSION_1){
colLength[i] = htonl(colLength[i]);
}
if (colLength[i] >= dataLen) {
tscError("invalid colLength %d, dataLen %d", colLength[i], dataLen);
return TSDB_CODE_TSC_INTERNAL_ERROR;
@ -2078,6 +2121,9 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
pStart += colLength[i];
}
// bool blankFill = *(bool*)p;
p += sizeof(bool);
if (convertUcs4) {
code = doConvertUCS4(pResultInfo, numOfRows, numOfCols, colLength);
}

View File

@ -21,8 +21,8 @@
#include "tglobal.h"
#include "tmsgtype.h"
#define LOG_ID_TAG "connId:0x%"PRIx64",reqId:0x%"PRIx64
#define LOG_ID_VALUE *(int64_t*)taos,pRequest->requestId
#define LOG_ID_TAG "connId:0x%" PRIx64 ",reqId:0x%" PRIx64
#define LOG_ID_VALUE *(int64_t*)taos, pRequest->requestId
static tb_uid_t processSuid(tb_uid_t suid, char* db) { return suid + MurmurHash3_32(db, strlen(db)); }
@ -31,8 +31,7 @@ static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* sch
char* string = NULL;
cJSON* json = cJSON_CreateObject();
if (json == NULL) {
uError("create json object failed")
return NULL;
uError("create json object failed") return NULL;
}
cJSON* type = cJSON_CreateString("create");
cJSON_AddItemToObject(json, "type", type);
@ -56,7 +55,7 @@ static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* sch
cJSON_AddItemToObject(column, "name", cname);
cJSON* ctype = cJSON_CreateNumber(s->type);
cJSON_AddItemToObject(column, "type", ctype);
if (s->type == TSDB_DATA_TYPE_BINARY || s->type == TSDB_DATA_TYPE_VARBINARY|| s->type == TSDB_DATA_TYPE_GEOMETRY) {
if (s->type == TSDB_DATA_TYPE_BINARY || s->type == TSDB_DATA_TYPE_VARBINARY || s->type == TSDB_DATA_TYPE_GEOMETRY) {
int32_t length = s->bytes - VARSTR_HEADER_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
cJSON_AddItemToObject(column, "length", cbytes);
@ -131,7 +130,8 @@ static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) {
cJSON* colType = cJSON_CreateNumber(field->type);
cJSON_AddItemToObject(json, "colType", colType);
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY || field->type == TSDB_DATA_TYPE_GEOMETRY) {
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY ||
field->type == TSDB_DATA_TYPE_GEOMETRY) {
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
cJSON_AddItemToObject(json, "colLength", cbytes);
@ -156,7 +156,8 @@ static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) {
cJSON_AddItemToObject(json, "colName", colName);
cJSON* colType = cJSON_CreateNumber(field->type);
cJSON_AddItemToObject(json, "colType", colType);
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY || field->type == TSDB_DATA_TYPE_GEOMETRY) {
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY ||
field->type == TSDB_DATA_TYPE_GEOMETRY) {
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
cJSON_AddItemToObject(json, "colLength", cbytes);
@ -182,7 +183,7 @@ static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) {
}
string = cJSON_PrintUnformatted(json);
end:
end:
cJSON_Delete(json);
tFreeSMAltertbReq(&req);
return string;
@ -295,9 +296,9 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) {
cJSON* tvalue = NULL;
if (IS_VAR_DATA_TYPE(pTagVal->type)) {
char* buf = NULL;
if(pTagVal->type == TSDB_DATA_TYPE_VARBINARY){
buf = taosMemoryCalloc(pTagVal->nData*2 + 2 + 3, 1);
}else{
if (pTagVal->type == TSDB_DATA_TYPE_VARBINARY) {
buf = taosMemoryCalloc(pTagVal->nData * 2 + 2 + 3, 1);
} else {
buf = taosMemoryCalloc(pTagVal->nData + 3, 1);
}
@ -315,7 +316,7 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) {
cJSON_AddItemToArray(tags, tag);
}
end:
end:
cJSON_AddItemToObject(json, "tags", tags);
taosArrayDestroy(pTagVals);
}
@ -469,7 +470,8 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) {
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type);
cJSON_AddItemToObject(json, "colType", colType);
if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY || vAlterTbReq.type == TSDB_DATA_TYPE_VARBINARY || vAlterTbReq.type == TSDB_DATA_TYPE_GEOMETRY) {
if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY || vAlterTbReq.type == TSDB_DATA_TYPE_VARBINARY ||
vAlterTbReq.type == TSDB_DATA_TYPE_GEOMETRY) {
int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
cJSON_AddItemToObject(json, "colLength", cbytes);
@ -490,7 +492,8 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) {
cJSON_AddItemToObject(json, "colName", colName);
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.colModType);
cJSON_AddItemToObject(json, "colType", colType);
if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY || vAlterTbReq.colModType == TSDB_DATA_TYPE_VARBINARY || vAlterTbReq.colModType == TSDB_DATA_TYPE_GEOMETRY) {
if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY || vAlterTbReq.colModType == TSDB_DATA_TYPE_VARBINARY ||
vAlterTbReq.colModType == TSDB_DATA_TYPE_GEOMETRY) {
int32_t length = vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
cJSON_AddItemToObject(json, "colLength", cbytes);
@ -527,9 +530,9 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) {
}
buf = parseTagDatatoJson(vAlterTbReq.pTagVal);
} else {
if(vAlterTbReq.tagType == TSDB_DATA_TYPE_VARBINARY){
buf = taosMemoryCalloc(vAlterTbReq.nTagVal*2 + 2 + 3, 1);
}else{
if (vAlterTbReq.tagType == TSDB_DATA_TYPE_VARBINARY) {
buf = taosMemoryCalloc(vAlterTbReq.nTagVal * 2 + 2 + 3, 1);
} else {
buf = taosMemoryCalloc(vAlterTbReq.nTagVal + 3, 1);
}
dataConverToStr(buf, vAlterTbReq.tagType, vAlterTbReq.pTagVal, vAlterTbReq.nTagVal, NULL);
@ -677,7 +680,7 @@ _exit:
}
static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
if(taos == NULL || meta == NULL) {
if (taos == NULL || meta == NULL) {
terrno = TSDB_CODE_INVALID_PARA;
return terrno;
}
@ -692,7 +695,7 @@ static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
terrno = code;
return code;
}
uDebug(LOG_ID_TAG" create stable, meta:%p, metaLen:%d", LOG_ID_VALUE, meta, metaLen);
uDebug(LOG_ID_TAG " create stable, meta:%p, metaLen:%d", LOG_ID_VALUE, meta, metaLen);
pRequest->syncQuery = true;
if (!pRequest->pDb) {
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
@ -731,8 +734,8 @@ static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
pReq.source = TD_REQ_FROM_TAOX;
pReq.igExists = true;
uDebug(LOG_ID_TAG" create stable name:%s suid:%" PRId64 " processSuid:%" PRId64,
LOG_ID_VALUE, req.name, req.suid, pReq.suid);
uDebug(LOG_ID_TAG " create stable name:%s suid:%" PRId64 " processSuid:%" PRId64, LOG_ID_VALUE, req.name, req.suid,
pReq.suid);
STscObj* pTscObj = pRequest->pTscObj;
SName tableName;
tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name);
@ -766,7 +769,7 @@ static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
taosMemoryFree(pCmdMsg.pMsg);
end:
uDebug(LOG_ID_TAG" create stable return, msg:%s", LOG_ID_VALUE, tstrerror(code));
uDebug(LOG_ID_TAG " create stable return, msg:%s", LOG_ID_VALUE, tstrerror(code));
destroyRequest(pRequest);
tFreeSMCreateStbReq(&pReq);
tDecoderClear(&coder);
@ -775,7 +778,7 @@ end:
}
static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
if(taos == NULL || meta == NULL) {
if (taos == NULL || meta == NULL) {
terrno = TSDB_CODE_INVALID_PARA;
return terrno;
}
@ -791,7 +794,7 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
return code;
}
uDebug(LOG_ID_TAG" drop stable, meta:%p, metaLen:%d", LOG_ID_VALUE, meta, metaLen);
uDebug(LOG_ID_TAG " drop stable, meta:%p, metaLen:%d", LOG_ID_VALUE, meta, metaLen);
pRequest->syncQuery = true;
if (!pRequest->pDb) {
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
@ -812,9 +815,9 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
goto end;
}
SRequestConnInfo conn = {.pTrans = pRequest->pTscObj->pAppInfo->pTransporter,
.requestId = pRequest->requestId,
.requestObjRefId = pRequest->self,
.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp)};
.requestId = pRequest->requestId,
.requestObjRefId = pRequest->self,
.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp)};
SName pName = {0};
toName(pRequest->pTscObj->acctId, pRequest->pDb, req.name, &pName);
STableMeta* pTableMeta = NULL;
@ -835,8 +838,8 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
pReq.source = TD_REQ_FROM_TAOX;
// pReq.suid = processSuid(req.suid, pRequest->pDb);
uDebug(LOG_ID_TAG" drop stable name:%s suid:%" PRId64 " new suid:%" PRId64,
LOG_ID_VALUE, req.name, req.suid, pReq.suid);
uDebug(LOG_ID_TAG " drop stable name:%s suid:%" PRId64 " new suid:%" PRId64, LOG_ID_VALUE, req.name, req.suid,
pReq.suid);
STscObj* pTscObj = pRequest->pTscObj;
SName tableName = {0};
tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name);
@ -870,7 +873,7 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
taosMemoryFree(pCmdMsg.pMsg);
end:
uDebug(LOG_ID_TAG" drop stable return, msg:%s", LOG_ID_VALUE, tstrerror(code));
uDebug(LOG_ID_TAG " drop stable return, msg:%s", LOG_ID_VALUE, tstrerror(code));
destroyRequest(pRequest);
tDecoderClear(&coder);
terrno = code;
@ -889,7 +892,7 @@ static void destroyCreateTbReqBatch(void* data) {
}
static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
if(taos == NULL || meta == NULL) {
if (taos == NULL || meta == NULL) {
terrno = TSDB_CODE_INVALID_PARA;
return terrno;
}
@ -939,9 +942,9 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
taosHashSetFreeFp(pVgroupHashmap, destroyCreateTbReqBatch);
SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
.requestId = pRequest->requestId,
.requestObjRefId = pRequest->self,
.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
.requestId = pRequest->requestId,
.requestObjRefId = pRequest->self,
.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName));
// loop to create table
@ -955,7 +958,6 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
taosArrayPush(pRequest->tableList, &pName);
pCreateReq->flags |= TD_CREATE_IF_NOT_EXISTS;
// change tag cid to new cid
@ -966,6 +968,12 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
// pCreateReq->ctb.suid = processSuid(pCreateReq->ctb.suid, pRequest->pDb);
toName(pTscObj->acctId, pRequest->pDb, pCreateReq->ctb.stbName, &sName);
code = catalogGetTableMeta(pCatalog, &conn, &sName, &pTableMeta);
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
code = TSDB_CODE_SUCCESS;
taosMemoryFreeClear(pTableMeta);
continue;
}
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
@ -983,6 +991,7 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
}
taosMemoryFreeClear(pTableMeta);
}
taosArrayPush(pRequest->tableList, &pName);
SVgroupCreateTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
if (pTableBatch == NULL) {
@ -992,6 +1001,7 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
tBatch.req.pArray = taosArrayInit(4, sizeof(struct SVCreateTbReq));
taosArrayPush(tBatch.req.pArray, pCreateReq);
tBatch.req.source = TD_REQ_FROM_TAOX;
taosHashPut(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId), &tBatch, sizeof(tBatch));
} else { // add to the correct vgroup
@ -999,6 +1009,9 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
}
}
if (taosHashGetSize(pVgroupHashmap) == 0) {
goto end;
}
SArray* pBufArray = serializeVgroupsCreateTableBatch(pVgroupHashmap);
if (NULL == pBufArray) {
code = TSDB_CODE_OUT_OF_MEMORY;
@ -1024,7 +1037,7 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
code = pRequest->code;
end:
uDebug(LOG_ID_TAG" create table return, msg:%s", LOG_ID_VALUE, tstrerror(code));
uDebug(LOG_ID_TAG " create table return, msg:%s", LOG_ID_VALUE, tstrerror(code));
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
pCreateReq = req.pReqs + iReq;
taosMemoryFreeClear(pCreateReq->comment);
@ -1053,7 +1066,7 @@ static void destroyDropTbReqBatch(void* data) {
}
static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
if(taos == NULL || meta == NULL) {
if (taos == NULL || meta == NULL) {
terrno = TSDB_CODE_INVALID_PARA;
return terrno;
}
@ -1102,9 +1115,9 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
taosHashSetFreeFp(pVgroupHashmap, destroyDropTbReqBatch);
SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
.requestId = pRequest->requestId,
.requestObjRefId = pRequest->self,
.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
.requestId = pRequest->requestId,
.requestObjRefId = pRequest->self,
.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName));
// loop to create table
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
@ -1133,7 +1146,8 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
tb_uid_t oldSuid = pDropReq->suid;
pDropReq->suid = pTableMeta->suid;
taosMemoryFreeClear(pTableMeta);
uDebug(LOG_ID_TAG" drop table name:%s suid:%" PRId64 " new suid:%" PRId64, LOG_ID_VALUE, pDropReq->name, oldSuid, pDropReq->suid);
uDebug(LOG_ID_TAG " drop table name:%s suid:%" PRId64 " new suid:%" PRId64, LOG_ID_VALUE, pDropReq->name, oldSuid,
pDropReq->suid);
taosArrayPush(pRequest->tableList, &pName);
SVgroupDropTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
@ -1176,7 +1190,7 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
code = pRequest->code;
end:
uDebug(LOG_ID_TAG" drop table return, msg:%s", LOG_ID_VALUE, tstrerror(code));
uDebug(LOG_ID_TAG " drop table return, msg:%s", LOG_ID_VALUE, tstrerror(code));
taosHashCleanup(pVgroupHashmap);
destroyRequest(pRequest);
tDecoderClear(&coder);
@ -1218,16 +1232,16 @@ end:
//}
static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) {
if(taos == NULL || meta == NULL) {
if (taos == NULL || meta == NULL) {
terrno = TSDB_CODE_INVALID_PARA;
return terrno;
}
SDeleteRes req = {0};
SDecoder coder = {0};
SDeleteRes req = {0};
SDecoder coder = {0};
char sql[256] = {0};
int32_t code = TSDB_CODE_SUCCESS;
uDebug("connId:0x%"PRIx64" delete data, meta:%p, len:%d", *(int64_t*)taos, meta, metaLen);
uDebug("connId:0x%" PRIx64 " delete data, meta:%p, len:%d", *(int64_t*)taos, meta, metaLen);
// decode and process req
void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
@ -1251,19 +1265,19 @@ static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) {
taos_free_result(res);
end:
uDebug("connId:0x%"PRIx64" delete data sql:%s, code:%s", *(int64_t*)taos, sql, tstrerror(code));
uDebug("connId:0x%" PRIx64 " delete data sql:%s, code:%s", *(int64_t*)taos, sql, tstrerror(code));
tDecoderClear(&coder);
terrno = code;
return code;
}
static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
if(taos == NULL || meta == NULL) {
if (taos == NULL || meta == NULL) {
terrno = TSDB_CODE_INVALID_PARA;
return terrno;
}
SVAlterTbReq req = {0};
SDecoder coder = {0};
SDecoder dcoder = {0};
int32_t code = TSDB_CODE_SUCCESS;
SRequestObj* pRequest = NULL;
SQuery* pQuery = NULL;
@ -1284,8 +1298,8 @@ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
// decode and process req
void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
int32_t len = metaLen - sizeof(SMsgHead);
tDecoderInit(&coder, data, len);
if (tDecodeSVAlterTbReq(&coder, &req) < 0) {
tDecoderInit(&dcoder, data, len);
if (tDecodeSVAlterTbReq(&dcoder, &req) < 0) {
code = TSDB_CODE_INVALID_PARA;
goto end;
}
@ -1303,9 +1317,9 @@ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
}
SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
.requestId = pRequest->requestId,
.requestObjRefId = pRequest->self,
.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
.requestId = pRequest->requestId,
.requestObjRefId = pRequest->self,
.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
SVgroupInfo pInfo = {0};
SName pName = {0};
@ -1327,14 +1341,36 @@ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
goto end;
}
pVgData->vg = pInfo;
pVgData->pData = taosMemoryMalloc(metaLen);
if (NULL == pVgData->pData) {
int tlen = 0;
req.source = TD_REQ_FROM_TAOX;
tEncodeSize(tEncodeSVAlterTbReq, &req, tlen, code);
if(code != 0){
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
memcpy(pVgData->pData, meta, metaLen);
((SMsgHead*)pVgData->pData)->vgId = htonl(pInfo.vgId);
pVgData->size = metaLen;
tlen += sizeof(SMsgHead);
void* pMsg = taosMemoryMalloc(tlen);
if (NULL == pMsg) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
((SMsgHead*)pMsg)->vgId = htonl(pInfo.vgId);
((SMsgHead*)pMsg)->contLen = htonl(tlen);
void* pBuf = POINTER_SHIFT(pMsg, sizeof(SMsgHead));
SEncoder coder = {0};
tEncoderInit(&coder, pBuf, tlen - sizeof(SMsgHead));
code = tEncodeSVAlterTbReq(&coder, &req);
if(code != 0){
tEncoderClear(&coder);
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
tEncoderClear(&coder);
pVgData->pData = pMsg;
pVgData->size = tlen;
pVgData->numOfTables = 1;
taosArrayPush(pArray, &pVgData);
@ -1374,7 +1410,7 @@ end:
if (pVgData) taosMemoryFreeClear(pVgData->pData);
taosMemoryFreeClear(pVgData);
destroyRequest(pRequest);
tDecoderClear(&coder);
tDecoderClear(&dcoder);
qDestroyQuery(pQuery);
terrno = code;
return code;
@ -1385,8 +1421,8 @@ int taos_write_raw_block_with_fields(TAOS* taos, int rows, char* pData, const ch
return taos_write_raw_block_with_fields_with_reqid(taos, rows, pData, tbname, fields, numFields, 0);
}
int taos_write_raw_block_with_fields_with_reqid(TAOS *taos, int rows, char *pData, const char *tbname,
TAOS_FIELD *fields, int numFields, int64_t reqid){
int taos_write_raw_block_with_fields_with_reqid(TAOS* taos, int rows, char* pData, const char* tbname,
TAOS_FIELD* fields, int numFields, int64_t reqid) {
if (!taos || !pData || !tbname) {
terrno = TSDB_CODE_INVALID_PARA;
return terrno;
@ -1401,8 +1437,8 @@ int taos_write_raw_block_with_fields_with_reqid(TAOS *taos, int rows, char *pDat
return terrno;
}
uDebug(LOG_ID_TAG " write raw block with field, rows:%d, pData:%p, tbname:%s, fields:%p, numFields:%d",
LOG_ID_VALUE, rows, pData, tbname, fields, numFields);
uDebug(LOG_ID_TAG " write raw block with field, rows:%d, pData:%p, tbname:%s, fields:%p, numFields:%d", LOG_ID_VALUE,
rows, pData, tbname, fields, numFields);
pRequest->syncQuery = true;
if (!pRequest->pDb) {
@ -1553,8 +1589,20 @@ end:
return code;
}
static void* getRawDataFromRes(void* pRetrieve) {
void* rawData = NULL;
// deal with compatibility
if (*(int64_t*)pRetrieve == 0) {
rawData = ((SRetrieveTableRsp*)pRetrieve)->data;
} else if (*(int64_t*)pRetrieve == 1) {
rawData = ((SRetrieveTableRspForTmq*)pRetrieve)->data;
}
ASSERT(rawData != NULL);
return rawData;
}
static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
if(taos == NULL || data == NULL){
if (taos == NULL || data == NULL) {
terrno = TSDB_CODE_INVALID_PARA;
return terrno;
}
@ -1607,7 +1655,7 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
}
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
while (++rspObj.resIter < rspObj.rsp.blockNum) {
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
void* pRetrieve = taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
if (!rspObj.rsp.withSchema) {
goto end;
}
@ -1623,11 +1671,11 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
strcpy(pName.tname, tbName);
code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
// if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
// uError("WriteRaw:catalogGetTableMeta table not exist. table name: %s", tbName);
// code = TSDB_CODE_SUCCESS;
// continue;
// }
// if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
// uError("WriteRaw:catalogGetTableMeta table not exist. table name: %s", tbName);
// code = TSDB_CODE_SUCCESS;
// continue;
// }
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
@ -1653,7 +1701,8 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
fields[i].bytes = pSW->pSchema[i].bytes;
tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name));
}
code = rawBlockBindData(pQuery, pTableMeta, pRetrieve->data, NULL, fields, pSW->nCols, true);
void* rawData = getRawDataFromRes(pRetrieve);
code = rawBlockBindData(pQuery, pTableMeta, rawData, NULL, fields, pSW->nCols, true);
taosMemoryFree(fields);
if (code != TSDB_CODE_SUCCESS) {
goto end;
@ -1682,7 +1731,7 @@ end:
}
static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) {
if(taos == NULL || data == NULL){
if (taos == NULL || data == NULL) {
terrno = TSDB_CODE_INVALID_PARA;
return terrno;
}
@ -1735,9 +1784,9 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
}
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
uDebug(LOG_ID_TAG" write raw metadata block num:%d", LOG_ID_VALUE, rspObj.rsp.blockNum);
uDebug(LOG_ID_TAG " write raw metadata block num:%d", LOG_ID_VALUE, rspObj.rsp.blockNum);
while (++rspObj.resIter < rspObj.rsp.blockNum) {
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
void* pRetrieve = taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
if (!rspObj.rsp.withSchema) {
goto end;
}
@ -1748,7 +1797,7 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
goto end;
}
uDebug(LOG_ID_TAG" write raw metadata block tbname:%s", LOG_ID_VALUE, tbName);
uDebug(LOG_ID_TAG " write raw metadata block tbname:%s", LOG_ID_VALUE, tbName);
SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
strcpy(pName.dbname, pRequest->pDb);
strcpy(pName.tname, tbName);
@ -1795,11 +1844,11 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
strcpy(pName.tname, pCreateReqDst->ctb.stbName);
}
code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
// if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
// uError("WriteRaw:catalogGetTableMeta table not exist. table name: %s", tbName);
// code = TSDB_CODE_SUCCESS;
// continue;
// }
// if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
// uError("WriteRaw:catalogGetTableMeta table not exist. table name: %s", tbName);
// code = TSDB_CODE_SUCCESS;
// continue;
// }
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
@ -1824,12 +1873,12 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
fields[i].bytes = pSW->pSchema[i].bytes;
tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name));
}
code = rawBlockBindData(pQuery, pTableMeta, pRetrieve->data, pCreateReqDst, fields, pSW->nCols, true);
void* rawData = getRawDataFromRes(pRetrieve);
code = rawBlockBindData(pQuery, pTableMeta, rawData, &pCreateReqDst, fields, pSW->nCols, true);
taosMemoryFree(fields);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
pCreateReqDst = NULL;
taosMemoryFreeClear(pTableMeta);
}

View File

@ -406,10 +406,6 @@ int32_t stmtGetFromCache(STscStmt* pStmt) {
if (NULL == pStmt->sql.pTableCache || taosHashGetSize(pStmt->sql.pTableCache) <= 0) {
if (pStmt->bInfo.inExecCache) {
if (ASSERT(taosHashGetSize(pStmt->exec.pBlockHash) == 1)) {
tscError("stmtGetFromCache error");
return TSDB_CODE_TSC_STMT_CACHE_ERROR;
}
pStmt->bInfo.needParse = false;
tscDebug("reuse stmt block for tb %s in execBlock", pStmt->bInfo.tbFName);
return TSDB_CODE_SUCCESS;

File diff suppressed because it is too large Load Diff

View File

@ -829,7 +829,10 @@ TEST(clientCase, projection_query_tables) {
TAOS_RES* pRes = taos_query(pConn, "use abc1");
taos_free_result(pRes);
pRes = taos_query(pConn, "create stable st2 (ts timestamp, k int, f varchar(4096)) tags(a int)");
// TAOS_RES* pRes = taos_query(pConn, "select tbname, last(ts) from abc1.stable_1 group by tbname");
// taos_free_result(pRes);
pRes = taos_query(pConn, "create stream stream_1 trigger at_once fill_history 1 ignore expired 0 into str_res1 as select _wstart as ts, count(*) from stable_1 interval(10s);");
if (taos_errno(pRes) != 0) {
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
}

View File

@ -37,8 +37,7 @@ static const SSysDbTableSchema dnodesSchema[] = {
{.name = "reboot_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
{.name = "note", .bytes = 256 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
#ifdef TD_ENTERPRISE
{.name = "active_code", .bytes = TSDB_ACTIVE_KEY_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "c_active_code", .bytes = TSDB_CONN_ACTIVE_KEY_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "machine_id", .bytes = TSDB_MACHINE_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
#endif
};
@ -349,6 +348,24 @@ static const SSysDbTableSchema userCompactsDetailSchema[] = {
{.name = "start_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
};
static const SSysDbTableSchema useGrantsFullSchema[] = {
{.name = "grant_name", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "display_name", .bytes = 256 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "expire", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "limits", .bytes = 512 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
};
static const SSysDbTableSchema useGrantsLogsSchema[] = {
{.name = "state", .bytes = 1536 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "active", .bytes = 512 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "machine", .bytes = TSDB_GRANT_LOG_COL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
};
static const SSysDbTableSchema useMachinesSchema[] = {
{.name = "id", .bytes = TSDB_CLUSTER_ID_LEN + 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "machine", .bytes = 7552 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
};
static const SSysTableMeta infosMeta[] = {
{TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema), true},
{TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true},
@ -378,6 +395,9 @@ static const SSysTableMeta infosMeta[] = {
{TSDB_INS_TABLE_VIEWS, userViewsSchema, tListLen(userViewsSchema), false},
{TSDB_INS_TABLE_COMPACTS, userCompactsSchema, tListLen(userCompactsSchema), false},
{TSDB_INS_TABLE_COMPACT_DETAILS, userCompactsDetailSchema, tListLen(userCompactsDetailSchema), false},
{TSDB_INS_TABLE_GRANTS_FULL, useGrantsFullSchema, tListLen(useGrantsFullSchema), false},
{TSDB_INS_TABLE_GRANTS_LOGS, useGrantsLogsSchema, tListLen(useGrantsLogsSchema), false},
{TSDB_INS_TABLE_MACHINES, useMachinesSchema, tListLen(useMachinesSchema), false},
};
static const SSysDbTableSchema connectionsSchema[] = {

View File

@ -631,7 +631,6 @@ SSDataBlock* blockDataExtractBlock(SSDataBlock* pBlock, int32_t startIndex, int3
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, i);
SColumnInfoData* pDstCol = taosArrayGet(pDst->pDataBlock, i);
for (int32_t j = startIndex; j < (startIndex + rowCount); ++j) {
bool isNull = false;
if (pBlock->pBlockAgg == NULL) {
@ -866,9 +865,9 @@ size_t blockDataGetRowSize(SSDataBlock* pBlock) {
* @return
*/
size_t blockDataGetSerialMetaSize(uint32_t numOfCols) {
// | version | total length | total rows | total columns | flag seg| block group id | column schema
// | version | total length | total rows | blankFull | total columns | flag seg| block group id | column schema
// | each column length |
return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(uint64_t) +
return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(bool) + sizeof(int32_t) + sizeof(int32_t) + sizeof(uint64_t) +
numOfCols * (sizeof(int8_t) + sizeof(int32_t)) + numOfCols * sizeof(int32_t);
}
@ -1436,6 +1435,7 @@ SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData) {
pBlock->info.capacity = 0;
pBlock->info.rowSize = 0;
pBlock->info.id = pDataBlock->info.id;
pBlock->info.blankFill = pDataBlock->info.blankFill;
size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock);
for (int32_t i = 0; i < numOfCols; ++i) {
@ -2196,7 +2196,7 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
// todo extract method
int32_t* version = (int32_t*)data;
*version = 1;
*version = BLOCK_VERSION_1;
data += sizeof(int32_t);
int32_t* actualLen = (int32_t*)data;
@ -2282,6 +2282,10 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
// htonl(colSizes[col]), colSizes[col]);
}
bool* blankFill = (bool*)data;
*blankFill = pBlock->info.blankFill;
data += sizeof(bool);
*actualLen = dataLen;
*groupId = pBlock->info.id.groupId;
ASSERT(dataLen > 0);
@ -2293,7 +2297,6 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
int32_t version = *(int32_t*)pStart;
pStart += sizeof(int32_t);
ASSERT(version == 1);
// total length sizeof(int32_t)
int32_t dataLen = *(int32_t*)pStart;
@ -2374,8 +2377,12 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
pStart += colLen[i];
}
bool blankFill = *(bool*)pStart;
pStart += sizeof(bool);
pBlock->info.dataLoad = 1;
pBlock->info.rows = numOfRows;
pBlock->info.blankFill = blankFill;
ASSERT(pStart - pData == dataLen);
return pStart;
}

View File

@ -24,6 +24,7 @@ static int32_t (*tColDataAppendValueImpl[8][3])(SColData *pColData, uint8_t *pDa
static int32_t (*tColDataUpdateValueImpl[8][3])(SColData *pColData, uint8_t *pData, uint32_t nData, bool forward);
// SBuffer ================================
#ifdef BUILD_NO_CALL
void tBufferDestroy(SBuffer *pBuffer) {
tFree(pBuffer->pBuf);
pBuffer->pBuf = NULL;
@ -55,7 +56,7 @@ int32_t tBufferReserve(SBuffer *pBuffer, int64_t nData, void **ppData) {
return code;
}
#endif
// ================================
static int32_t tGetTagVal(uint8_t *p, STagVal *pTagVal, int8_t isJson);
@ -1148,6 +1149,7 @@ static int tTagValJsonCmprFn(const void *p1, const void *p2) {
return strcmp(((STagVal *)p1)[0].pKey, ((STagVal *)p2)[0].pKey);
}
#ifdef TD_DEBUG_PRINT_TAG
static void debugPrintTagVal(int8_t type, const void *val, int32_t vlen, const char *tag, int32_t ln) {
switch (type) {
case TSDB_DATA_TYPE_VARBINARY:
@ -1239,6 +1241,7 @@ void debugPrintSTag(STag *pTag, const char *tag, int32_t ln) {
}
printf("\n");
}
#endif
static int32_t tPutTagVal(uint8_t *p, STagVal *pTagVal, int8_t isJson) {
int32_t n = 0;
@ -2576,6 +2579,7 @@ _exit:
return code;
}
#ifdef BUILD_NO_CALL
static int32_t tColDataSwapValue(SColData *pColData, int32_t i, int32_t j) {
int32_t code = 0;
@ -2658,6 +2662,7 @@ static void tColDataSwap(SColData *pColData, int32_t i, int32_t j) {
break;
}
}
#endif
static int32_t tColDataCopyRowCell(SColData *pFromColData, int32_t iFromRow, SColData *pToColData, int32_t iToRow) {
int32_t code = TSDB_CODE_SUCCESS;

View File

@ -27,7 +27,7 @@
#include "cus_name.h"
#endif
GRANT_CFG_DECLARE;
// GRANT_CFG_DECLARE;
SConfig *tsCfg = NULL;
@ -58,7 +58,7 @@ int32_t tsNumOfMnodeQueryThreads = 4;
int32_t tsNumOfMnodeFetchThreads = 1;
int32_t tsNumOfMnodeReadThreads = 1;
int32_t tsNumOfVnodeQueryThreads = 4;
float tsRatioOfVnodeStreamThreads = 1.0;
float tsRatioOfVnodeStreamThreads = 0.5F;
int32_t tsNumOfVnodeFetchThreads = 4;
int32_t tsNumOfVnodeRsmaThreads = 2;
int32_t tsNumOfQnodeQueryThreads = 4;
@ -265,6 +265,7 @@ bool tsDisableStream = false;
int64_t tsStreamBufferSize = 128 * 1024 * 1024;
bool tsFilterScalarMode = false;
int tsResolveFQDNRetryTime = 100; // seconds
int tsStreamAggCnt = 1000;
char tsS3Endpoint[TSDB_FQDN_LEN] = "<endpoint>";
char tsS3AccessKey[TSDB_FQDN_LEN] = "<accesskey>";
@ -622,7 +623,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
0)
return -1;
if (cfgAddFloat(pCfg, "ratioOfVnodeStreamThreads", tsRatioOfVnodeStreamThreads, 0.01, 10, CFG_SCOPE_SERVER,
if (cfgAddFloat(pCfg, "ratioOfVnodeStreamThreads", tsRatioOfVnodeStreamThreads, 0.01, 4, CFG_SCOPE_SERVER,
CFG_DYN_NONE) != 0)
return -1;
@ -750,6 +751,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddBool(pCfg, "disableStream", tsDisableStream, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) != 0) return -1;
if (cfgAddInt64(pCfg, "streamBufferSize", tsStreamBufferSize, 0, INT64_MAX, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0)
return -1;
if (cfgAddInt64(pCfg, "streamAggCnt", tsStreamAggCnt, 2, INT32_MAX, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0)
return -1;
if (cfgAddInt32(pCfg, "checkpointInterval", tsStreamCheckpointInterval, 60, 1200, CFG_SCOPE_SERVER,
CFG_DYN_ENT_SERVER) != 0)
@ -804,7 +807,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddBool(pCfg, "experimental", tsExperimental, CFG_SCOPE_BOTH, CFG_DYN_BOTH) != 0) return -1;
GRANT_CFG_ADD;
// GRANT_CFG_ADD;
return 0;
}
@ -1213,6 +1216,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsDisableStream = cfgGetItem(pCfg, "disableStream")->bval;
tsStreamBufferSize = cfgGetItem(pCfg, "streamBufferSize")->i64;
tsStreamAggCnt = cfgGetItem(pCfg, "streamAggCnt")->i32;
tsStreamBufferSize = cfgGetItem(pCfg, "streamBufferSize")->i64;
tsStreamCheckpointInterval = cfgGetItem(pCfg, "checkpointInterval")->i32;
tsSinkDataRate = cfgGetItem(pCfg, "streamSinkDataRate")->fval;
@ -1229,7 +1234,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsExperimental = cfgGetItem(pCfg, "experimental")->bval;
GRANT_CFG_GET;
// GRANT_CFG_GET;
return 0;
}
@ -1601,10 +1606,6 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
tsTempSpace.reserved = (int64_t)(((double)pItem->fval) * 1024 * 1024 * 1024);
uInfo("%s set to %" PRId64, name, tsTempSpace.reserved);
matched = true;
} else if (strcasecmp("minimalDataDirGB", name) == 0) {
tsDataSpace.reserved = (int64_t)(((double)pItem->fval) * 1024 * 1024 * 1024);
uInfo("%s set to %" PRId64, name, tsDataSpace.reserved);
matched = true;
} else if (strcasecmp("minimalLogDirGB", name) == 0) {
tsLogSpace.reserved = (int64_t)(((double)pItem->fval) * 1024 * 1024 * 1024);
uInfo("%s set to %" PRId64, name, tsLogSpace.reserved);
@ -1675,10 +1676,6 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
return -1;
}
matched = true;
} else if (strcasecmp("telemetryServer", name) == 0) {
uInfo("%s set from %s to %s", name, pItem->str, tsTelemServer);
tstrncpy(tsTelemServer, pItem->str, TSDB_FQDN_LEN);
matched = true;
}
break;
}
@ -1801,4 +1798,17 @@ void taosSetAllDebugFlag(int32_t flag) {
if (terrno == TSDB_CODE_CFG_NOT_FOUND) terrno = TSDB_CODE_SUCCESS; // ignore not exist
}
int8_t taosGranted() { return atomic_load_8(&tsGrant); }
int8_t taosGranted(int8_t type) {
switch (type) {
case TSDB_GRANT_ALL:
return atomic_load_8(&tsGrant) & GRANT_FLAG_ALL;
case TSDB_GRANT_AUDIT:
return atomic_load_8(&tsGrant) & GRANT_FLAG_AUDIT;
case TSDB_GRANT_VIEW:
return atomic_load_8(&tsGrant) & GRANT_FLAG_VIEW;
default:
ASSERTS(0, "undefined grant type:%" PRIi8, type);
break;
}
return 0;
}

View File

@ -19,5 +19,13 @@
#ifndef _GRANT
int32_t grantCheck(EGrantType grant) { return TSDB_CODE_SUCCESS; }
int32_t grantCheckExpire(EGrantType grant) { return TSDB_CODE_SUCCESS; }
#ifdef TD_UNIQ_GRANT
int32_t grantCheckLE(EGrantType grant) { return TSDB_CODE_SUCCESS; }
#endif
#else
#ifdef TD_UNIQ_GRANT
int32_t grantCheckExpire(EGrantType grant) { return TSDB_CODE_SUCCESS; }
#endif
#endif

View File

@ -15,11 +15,8 @@
#define _DEFAULT_SOURCE
#include "tmisce.h"
#include "tjson.h"
#include "tglobal.h"
#include "tlog.h"
#include "tname.h"
#include "tjson.h"
int32_t taosGetFqdnPortFromEp(const char* ep, SEp* pEp) {
pEp->port = 0;
memset(pEp->fqdn, 0, TSDB_FQDN_LEN);
@ -63,7 +60,7 @@ bool isEpsetEqual(const SEpSet* s1, const SEpSet* s2) {
void epsetAssign(SEpSet* pDst, const SEpSet* pSrc) {
if (pSrc == NULL || pDst == NULL) {
return;
return;
}
pDst->inUse = pSrc->inUse;
@ -73,6 +70,47 @@ void epsetAssign(SEpSet* pDst, const SEpSet* pSrc) {
tstrncpy(pDst->eps[i].fqdn, pSrc->eps[i].fqdn, tListLen(pSrc->eps[i].fqdn));
}
}
void epAssign(SEp* pDst, SEp* pSrc) {
if (pSrc == NULL || pDst == NULL) {
return;
}
memset(pDst->fqdn, 0, tListLen(pSrc->fqdn));
tstrncpy(pDst->fqdn, pSrc->fqdn, tListLen(pSrc->fqdn));
pDst->port = pSrc->port;
}
void epsetSort(SEpSet* pDst) {
if (pDst->numOfEps <= 1) {
return;
}
int validIdx = false;
SEp ep = {0};
if (pDst->inUse >= 0 && pDst->inUse < pDst->numOfEps) {
validIdx = true;
epAssign(&ep, &pDst->eps[pDst->inUse]);
}
for (int i = 0; i < pDst->numOfEps - 1; i++) {
for (int j = 0; j < pDst->numOfEps - 1 - i; j++) {
SEp* f = &pDst->eps[j];
SEp* s = &pDst->eps[j + 1];
int cmp = strncmp(f->fqdn, s->fqdn, sizeof(f->fqdn));
if (cmp > 0 || (cmp == 0 && f->port > s->port)) {
SEp ep = {0};
epAssign(&ep, f);
epAssign(f, s);
epAssign(s, &ep);
}
}
}
if (validIdx == true)
for (int i = 0; i < pDst->numOfEps; i++) {
int cmp = strncmp(ep.fqdn, pDst->eps[i].fqdn, sizeof(ep.fqdn));
if (cmp == 0 && ep.port == pDst->eps[i].port) {
pDst->inUse = i;
break;
}
}
}
void updateEpSet_s(SCorEpSet* pEpSet, SEpSet* pNewEpSet) {
taosCorBeginWrite(&pEpSet->version);

View File

@ -1163,9 +1163,11 @@ int32_t tSerializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
if (tEncodeI64(&encoder, pReq->updateTime) < 0) return -1;
if (tEncodeFloat(&encoder, pReq->numOfCores) < 0) return -1;
if (tEncodeI32(&encoder, pReq->numOfSupportVnodes) < 0) return -1;
if (tEncodeI32v(&encoder, pReq->numOfDiskCfg) < 0) return -1;
if (tEncodeI64(&encoder, pReq->memTotal) < 0) return -1;
if (tEncodeI64(&encoder, pReq->memAvail) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->dnodeEp) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->machineId) < 0) return -1;
// cluster cfg
if (tEncodeI32(&encoder, pReq->clusterCfg.statusInterval) < 0) return -1;
@ -1253,9 +1255,11 @@ int32_t tDeserializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
if (tDecodeI64(&decoder, &pReq->updateTime) < 0) return -1;
if (tDecodeFloat(&decoder, &pReq->numOfCores) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->numOfSupportVnodes) < 0) return -1;
if (tDecodeI32v(&decoder, &pReq->numOfDiskCfg) < 0) return -1;
if (tDecodeI64(&decoder, &pReq->memTotal) < 0) return -1;
if (tDecodeI64(&decoder, &pReq->memAvail) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->dnodeEp) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->machineId) < 0) return -1;
// cluster cfg
if (tDecodeI32(&decoder, &pReq->clusterCfg.statusInterval) < 0) return -1;
@ -2266,6 +2270,37 @@ int32_t tDeserializeSGetUserWhiteListRsp(void *buf, int32_t bufLen, SGetUserWhit
void tFreeSGetUserWhiteListRsp(SGetUserWhiteListRsp *pRsp) { taosMemoryFree(pRsp->pWhiteLists); }
int32_t tSerializeSMCfgClusterReq(void *buf, int32_t bufLen, SMCfgClusterReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->config) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->value) < 0) return -1;
ENCODESQL();
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSMCfgClusterReq(void *buf, int32_t bufLen, SMCfgClusterReq *pReq) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->config) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->value) < 0) return -1;
DECODESQL();
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSMCfgClusterReq(SMCfgClusterReq *pReq) { FREESQL(); }
int32_t tSerializeSCreateDropMQSNodeReq(void *buf, int32_t bufLen, SMCreateQnodeReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@ -6139,6 +6174,55 @@ int32_t tDeserializeSMqAskEpReq(void *buf, int32_t bufLen, SMqAskEpReq *pReq) {
return 0;
}
int32_t tDeatroySMqHbRsp(SMqHbRsp *pRsp) {
taosArrayDestroy(pRsp->topicPrivileges);
return 0;
}
int32_t tSerializeSMqHbRsp(void *buf, int32_t bufLen, SMqHbRsp *pRsp) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
int32_t sz = taosArrayGetSize(pRsp->topicPrivileges);
if (tEncodeI32(&encoder, sz) < 0) return -1;
for (int32_t i = 0; i < sz; ++i) {
STopicPrivilege *privilege = (STopicPrivilege *)taosArrayGet(pRsp->topicPrivileges, i);
if (tEncodeCStr(&encoder, privilege->topic) < 0) return -1;
if (tEncodeI8(&encoder, privilege->noPrivilege) < 0) return -1;
}
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSMqHbRsp(void *buf, int32_t bufLen, SMqHbRsp *pRsp) {
SDecoder decoder = {0};
tDecoderInit(&decoder, (char *)buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
int32_t sz = 0;
if (tDecodeI32(&decoder, &sz) < 0) return -1;
if (sz > 0) {
pRsp->topicPrivileges = taosArrayInit(sz, sizeof(STopicPrivilege));
if (NULL == pRsp->topicPrivileges) return -1;
for (int32_t i = 0; i < sz; ++i) {
STopicPrivilege *data = taosArrayReserve(pRsp->topicPrivileges, 1);
if (tDecodeCStrTo(&decoder, data->topic) < 0) return -1;
if (tDecodeI8(&decoder, &data->noPrivilege) < 0) return -1;
}
}
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
int32_t tDeatroySMqHbReq(SMqHbReq *pReq) {
for (int i = 0; i < taosArrayGetSize(pReq->topics); i++) {
TopicOffsetRows *vgs = taosArrayGet(pReq->topics, i);
@ -6168,6 +6252,7 @@ int32_t tSerializeSMqHbReq(void *buf, int32_t bufLen, SMqHbReq *pReq) {
if (tEncodeI32(&encoder, offRows->vgId) < 0) return -1;
if (tEncodeI64(&encoder, offRows->rows) < 0) return -1;
if (tEncodeSTqOffsetVal(&encoder, &offRows->offset) < 0) return -1;
if (tEncodeI64(&encoder, offRows->ever) < 0) return -1;
}
}
@ -6194,7 +6279,7 @@ int32_t tDeserializeSMqHbReq(void *buf, int32_t bufLen, SMqHbReq *pReq) {
if (NULL == pReq->topics) return -1;
for (int32_t i = 0; i < sz; ++i) {
TopicOffsetRows *data = taosArrayReserve(pReq->topics, 1);
tDecodeCStrTo(&decoder, data->topicName);
if (tDecodeCStrTo(&decoder, data->topicName) < 0) return -1;
int32_t szVgs = 0;
if (tDecodeI32(&decoder, &szVgs) < 0) return -1;
if (szVgs > 0) {
@ -6205,6 +6290,7 @@ int32_t tDeserializeSMqHbReq(void *buf, int32_t bufLen, SMqHbReq *pReq) {
if (tDecodeI32(&decoder, &offRows->vgId) < 0) return -1;
if (tDecodeI64(&decoder, &offRows->rows) < 0) return -1;
if (tDecodeSTqOffsetVal(&decoder, &offRows->offset) < 0) return -1;
if (tDecodeI64(&decoder, &offRows->ever) < 0) return -1;
}
}
}
@ -6516,6 +6602,7 @@ int32_t tSerializeSMqPollReq(void *buf, int32_t bufLen, SMqPollReq *pReq) {
if (tEncodeI64(&encoder, pReq->timeout) < 0) return -1;
if (tSerializeSTqOffsetVal(&encoder, &pReq->reqOffset) < 0) return -1;
if (tEncodeI8(&encoder, pReq->enableReplay) < 0) return -1;
if (tEncodeI8(&encoder, pReq->sourceExcluded) < 0) return -1;
tEndEncode(&encoder);
@ -6556,6 +6643,10 @@ int32_t tDeserializeSMqPollReq(void *buf, int32_t bufLen, SMqPollReq *pReq) {
if (tDecodeI8(&decoder, &pReq->enableReplay) < 0) return -1;
}
if (!tDecodeIsEnd(&decoder)) {
if (tDecodeI8(&decoder, &pReq->sourceExcluded) < 0) return -1;
}
tEndDecode(&decoder);
tDecoderClear(&decoder);
@ -7168,6 +7259,7 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS
if (tEncodeI8(&encoder, pReq->createStb) < 0) return -1;
if (tEncodeU64(&encoder, pReq->targetStbUid) < 0) return -1;
if (tEncodeI32(&encoder, taosArrayGetSize(pReq->fillNullCols)) < 0) return -1;
for (int32_t i = 0; i < taosArrayGetSize(pReq->fillNullCols); ++i) {
SColLocation *pCol = taosArrayGet(pReq->fillNullCols, i);
@ -7175,10 +7267,19 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS
if (tEncodeI16(&encoder, pCol->colId) < 0) return -1;
if (tEncodeI8(&encoder, pCol->type) < 0) return -1;
}
if (tEncodeI64(&encoder, pReq->deleteMark) < 0) return -1;
if (tEncodeI8(&encoder, pReq->igUpdate) < 0) return -1;
if (tEncodeI64(&encoder, pReq->lastTs) < 0) return -1;
if (tEncodeI32(&encoder, taosArrayGetSize(pReq->pVgroupVerList)) < 0) return -1;
for(int32_t i = 0; i < taosArrayGetSize(pReq->pVgroupVerList); ++i) {
SVgroupVer* p = taosArrayGet(pReq->pVgroupVerList, i);
if (tEncodeI32(&encoder, p->vgId) < 0) return -1;
if (tEncodeI64(&encoder, p->ver) < 0) return -1;
}
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -7189,6 +7290,8 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS
int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStreamReq *pReq) {
int32_t sqlLen = 0;
int32_t astLen = 0;
int32_t numOfFillNullCols = 0;
int32_t numOfVgVer = 0;
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
@ -7240,7 +7343,6 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea
}
if (tDecodeI8(&decoder, &pReq->createStb) < 0) return -1;
if (tDecodeU64(&decoder, &pReq->targetStbUid) < 0) return -1;
int32_t numOfFillNullCols = 0;
if (tDecodeI32(&decoder, &numOfFillNullCols) < 0) return -1;
if (numOfFillNullCols > 0) {
pReq->fillNullCols = taosArrayInit(numOfFillNullCols, sizeof(SColLocation));
@ -7265,9 +7367,28 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea
if (tDecodeI8(&decoder, &pReq->igUpdate) < 0) return -1;
if (tDecodeI64(&decoder, &pReq->lastTs) < 0) return -1;
tEndDecode(&decoder);
if (tDecodeI32(&decoder, &numOfVgVer) < 0) return -1;
if (numOfVgVer > 0) {
pReq->pVgroupVerList = taosArrayInit(numOfVgVer, sizeof(SVgroupVer));
if (pReq->pVgroupVerList == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
for (int32_t i = 0; i < numOfVgVer; ++i) {
SVgroupVer v = {0};
if (tDecodeI32(&decoder, &v.vgId) < 0) return -1;
if (tDecodeI64(&decoder, &v.ver) < 0) return -1;
if (taosArrayPush(pReq->pVgroupVerList, &v) == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
}
}
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
@ -7339,10 +7460,11 @@ void tFreeSCMCreateStreamReq(SCMCreateStreamReq *pReq) {
if (NULL == pReq) {
return;
}
taosArrayDestroy(pReq->pTags);
taosMemoryFreeClear(pReq->sql);
taosMemoryFreeClear(pReq->ast);
taosArrayDestroy(pReq->pTags);
taosArrayDestroy(pReq->fillNullCols);
taosArrayDestroy(pReq->pVgroupVerList);
}
int32_t tEncodeSRSmaParam(SEncoder *pCoder, const SRSmaParam *pRSmaParam) {
@ -7391,6 +7513,7 @@ int tEncodeSVCreateStbReq(SEncoder *pCoder, const SVCreateStbReq *pReq) {
if (pReq->alterOriDataLen > 0) {
if (tEncodeBinary(pCoder, pReq->alterOriData, pReq->alterOriDataLen) < 0) return -1;
}
if (tEncodeI8(pCoder, pReq->source) < 0) return -1;
tEndEncode(pCoder);
return 0;
@ -7413,6 +7536,10 @@ int tDecodeSVCreateStbReq(SDecoder *pCoder, SVCreateStbReq *pReq) {
if (tDecodeBinary(pCoder, (uint8_t **)&pReq->alterOriData, NULL) < 0) return -1;
}
if (!tDecodeIsEnd(pCoder)) {
if (tDecodeI8(pCoder, &pReq->source) < 0) return -1;
}
tEndDecode(pCoder);
return 0;
}
@ -7541,6 +7668,8 @@ int tEncodeSVCreateTbBatchReq(SEncoder *pCoder, const SVCreateTbBatchReq *pReq)
if (tEncodeSVCreateTbReq(pCoder, (SVCreateTbReq *)taosArrayGet(pReq->pArray, iReq)) < 0) return -1;
}
if (tEncodeI8(pCoder, pReq->source) < 0) return -1;
tEndEncode(pCoder);
return 0;
}
@ -7555,6 +7684,10 @@ int tDecodeSVCreateTbBatchReq(SDecoder *pCoder, SVCreateTbBatchReq *pReq) {
if (tDecodeSVCreateTbReq(pCoder, pReq->pReqs + iReq) < 0) return -1;
}
if (!tDecodeIsEnd(pCoder)) {
if (tDecodeI8(pCoder, &pReq->source) < 0) return -1;
}
tEndDecode(pCoder);
return 0;
}
@ -7753,36 +7886,6 @@ static int32_t tDecodeSVSubmitBlk(SDecoder *pCoder, SVSubmitBlk *pBlock, int32_t
return 0;
}
int32_t tEncodeSVSubmitReq(SEncoder *pCoder, const SVSubmitReq *pReq) {
int32_t nBlocks = taosArrayGetSize(pReq->pArray);
if (tStartEncode(pCoder) < 0) return -1;
if (tEncodeI32v(pCoder, pReq->flags) < 0) return -1;
if (tEncodeI32v(pCoder, nBlocks) < 0) return -1;
for (int32_t iBlock = 0; iBlock < nBlocks; iBlock++) {
if (tEncodeSVSubmitBlk(pCoder, (SVSubmitBlk *)taosArrayGet(pReq->pArray, iBlock), pReq->flags) < 0) return -1;
}
tEndEncode(pCoder);
return 0;
}
int32_t tDecodeSVSubmitReq(SDecoder *pCoder, SVSubmitReq *pReq) {
if (tStartDecode(pCoder) < 0) return -1;
if (tDecodeI32v(pCoder, &pReq->flags) < 0) return -1;
if (tDecodeI32v(pCoder, &pReq->nBlocks) < 0) return -1;
pReq->pBlocks = tDecoderMalloc(pCoder, sizeof(SVSubmitBlk) * pReq->nBlocks);
if (pReq->pBlocks == NULL) return -1;
for (int32_t iBlock = 0; iBlock < pReq->nBlocks; iBlock++) {
if (tDecodeSVSubmitBlk(pCoder, pReq->pBlocks + iBlock, pReq->flags) < 0) return -1;
}
tEndDecode(pCoder);
return 0;
}
static int32_t tEncodeSSubmitBlkRsp(SEncoder *pEncoder, const SSubmitBlkRsp *pBlock) {
if (tStartEncode(pEncoder) < 0) return -1;
@ -7942,6 +8045,7 @@ int32_t tEncodeSVAlterTbReq(SEncoder *pEncoder, const SVAlterTbReq *pReq) {
break;
}
if (tEncodeI64(pEncoder, pReq->ctimeMs) < 0) return -1;
if (tEncodeI8(pEncoder, pReq->source) < 0) return -1;
tEndEncode(pEncoder);
return 0;
@ -8002,6 +8106,9 @@ int32_t tDecodeSVAlterTbReq(SDecoder *pDecoder, SVAlterTbReq *pReq) {
if (!tDecodeIsEnd(pDecoder)) {
if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1;
}
if (!tDecodeIsEnd(pDecoder)) {
if (tDecodeI8(pDecoder, &pReq->source) < 0) return -1;
}
tEndDecode(pDecoder);
return 0;

View File

@ -12,9 +12,10 @@
#include "tcommon.h"
#include "tdatablock.h"
#include "tdef.h"
#include "tvariant.h"
#include "tmisce.h"
#include "ttime.h"
#include "ttokendef.h"
#include "tvariant.h"
namespace {
//
@ -25,11 +26,10 @@ int main(int argc, char** argv) {
return RUN_ALL_TESTS();
}
TEST(testCase, toUIntegerEx_test) {
uint64_t val = 0;
char* s = "123";
char* s = "123";
int32_t ret = toUIntegerEx(s, strlen(s), TK_NK_INTEGER, &val);
ASSERT_EQ(ret, 0);
ASSERT_EQ(val, 123);
@ -59,7 +59,7 @@ TEST(testCase, toUIntegerEx_test) {
ASSERT_EQ(val, 18699);
s = "-1";
ret = toUIntegerEx(s, strlen(s),TK_NK_INTEGER, &val);
ret = toUIntegerEx(s, strlen(s), TK_NK_INTEGER, &val);
ASSERT_EQ(ret, -1);
s = "-0b10010";
@ -103,7 +103,7 @@ TEST(testCase, toUIntegerEx_test) {
TEST(testCase, toIntegerEx_test) {
int64_t val = 0;
char* s = "123";
char* s = "123";
int32_t ret = toIntegerEx(s, strlen(s), TK_NK_INTEGER, &val);
ASSERT_EQ(ret, 0);
ASSERT_EQ(val, 123);
@ -166,7 +166,7 @@ TEST(testCase, toIntegerEx_test) {
s = "-9223372036854775808";
ret = toIntegerEx(s, strlen(s), TK_NK_INTEGER, &val);
ASSERT_EQ(ret, 0);
ASSERT_EQ(val, -9223372036854775808);
// ASSERT_EQ(val, -9223372036854775808);
// out of range
s = "9323372036854775807";
@ -186,7 +186,7 @@ TEST(testCase, toIntegerEx_test) {
TEST(testCase, toInteger_test) {
int64_t val = 0;
char* s = "123";
char* s = "123";
int32_t ret = toInteger(s, strlen(s), 10, &val);
ASSERT_EQ(ret, 0);
ASSERT_EQ(val, 123);
@ -223,10 +223,10 @@ TEST(testCase, toInteger_test) {
s = "-9223372036854775808";
ret = toInteger(s, strlen(s), 10, &val);
ASSERT_EQ(ret, 0);
ASSERT_EQ(val, -9223372036854775808);
// ASSERT_EQ(val, -9223372036854775808);
// out of range
s = "9323372036854775807";
s = "9323372036854775807";
ret = toInteger(s, strlen(s), 10, &val);
ASSERT_EQ(ret, -1);
@ -418,9 +418,10 @@ void check_tm(const STm* tm, int32_t y, int32_t mon, int32_t d, int32_t h, int32
ASSERT_EQ(tm->fsec, fsec);
}
void test_timestamp_tm_conversion(int64_t ts, int32_t precision, int32_t y, int32_t mon, int32_t d, int32_t h, int32_t m, int32_t s, int64_t fsec) {
int64_t ts_tmp;
char buf[128] = {0};
void test_timestamp_tm_conversion(int64_t ts, int32_t precision, int32_t y, int32_t mon, int32_t d, int32_t h,
int32_t m, int32_t s, int64_t fsec) {
int64_t ts_tmp;
char buf[128] = {0};
struct STm tm;
taosFormatUtcTime(buf, 128, ts, precision);
printf("formated ts of %ld, precision: %d is: %s\n", ts, precision, buf);
@ -457,7 +458,7 @@ TEST(timeTest, timestamp2tm) {
test_timestamp_tm_conversion(ts, TSDB_TIME_PRECISION_MILLI, 1970 - 1900, 0 /* mon start from 0*/, 1, 8, 0, 0,
000000000L);
ts = -62198784343000; // milliseconds before epoch, Friday, January 1, -0001 12:00:00 AM GMT+08:06
ts = -62198784343000; // milliseconds before epoch, Friday, January 1, -0001 12:00:00 AM GMT+08:06
test_timestamp_tm_conversion(ts, TSDB_TIME_PRECISION_MILLI, -1 - 1900, 0 /* mon start from 0*/, 1,
0 /* hour start from 0*/, 0, 0, 000000000L);
}
@ -472,7 +473,7 @@ void test_ts2char(int64_t ts, const char* format, int32_t precison, const char*
TEST(timeTest, ts2char) {
osDefaultInit();
if (tsTimezone != TdEastZone8) GTEST_SKIP();
int64_t ts;
int64_t ts;
const char* format = "YYYY-MM-DD";
ts = 0;
test_ts2char(ts, format, TSDB_TIME_PRECISION_MILLI, "1970-01-01");
@ -493,12 +494,13 @@ TEST(timeTest, ts2char) {
"2023-023-23-3-2023-023-23-3-年-OCTOBER -OCT-October -Oct-october "
"-oct-月-286-13-6-286-13-6-FRIDAY -Friday -friday -日");
#endif
ts = 1697182085123L; // Friday, October 13, 2023 3:28:05.123 PM GMT+08:00
ts = 1697182085123L; // Friday, October 13, 2023 3:28:05.123 PM GMT+08:00
test_ts2char(ts, "HH24:hh24:HH12:hh12:HH:hh:MI:mi:SS:ss:MS:ms:US:us:NS:ns:PM:AM:pm:am", TSDB_TIME_PRECISION_MILLI,
"15:15:03:03:03:03:28:28:05:05:123:123:123000:123000:123000000:123000000:PM:PM:pm:pm");
// double quotes normal output
test_ts2char(ts, "\\\"HH24:hh24:HH12:hh12:HH:hh:MI:mi:SS:ss:MS:ms:US:us:NS:ns:PM:AM:pm:am\\\"", TSDB_TIME_PRECISION_MILLI,
test_ts2char(ts, "\\\"HH24:hh24:HH12:hh12:HH:hh:MI:mi:SS:ss:MS:ms:US:us:NS:ns:PM:AM:pm:am\\\"",
TSDB_TIME_PRECISION_MILLI,
"\"15:15:03:03:03:03:28:28:05:05:123:123:123000:123000:123000000:123000000:PM:PM:pm:pm\"");
test_ts2char(ts, "\\\"HH24:hh24:HH12:hh12:HH:hh:MI:mi:SS:ss:MS:ms:US:us:NS:ns:PM:AM:pm:am", TSDB_TIME_PRECISION_MILLI,
"\"15:15:03:03:03:03:28:28:05:05:123:123:123000:123000:123000000:123000000:PM:PM:pm:pm");
@ -506,14 +508,18 @@ TEST(timeTest, ts2char) {
test_ts2char(ts, "\"HH24:hh24:HH12:hh12:HH:hh:MI:mi:SS:ss:MS:ms:US:us:NS:ns:PM:AM:pm:am", TSDB_TIME_PRECISION_MILLI,
"HH24:hh24:HH12:hh12:HH:hh:MI:mi:SS:ss:MS:ms:US:us:NS:ns:PM:AM:pm:am");
test_ts2char(ts, "yyyy-mm-dd hh24:mi:ss.nsamaaa", TSDB_TIME_PRECISION_MILLI, "2023-10-13 15:28:05.123000000pmaaa");
test_ts2char(ts, "aaa--yyyy-mm-dd hh24:mi:ss.nsamaaa", TSDB_TIME_PRECISION_MILLI, "aaa--2023-10-13 15:28:05.123000000pmaaa");
test_ts2char(ts, "add--yyyy-mm-dd hh24:mi:ss.nsamaaa", TSDB_TIME_PRECISION_MILLI, "a13--2023-10-13 15:28:05.123000000pmaaa");
test_ts2char(ts, "aaa--yyyy-mm-dd hh24:mi:ss.nsamaaa", TSDB_TIME_PRECISION_MILLI,
"aaa--2023-10-13 15:28:05.123000000pmaaa");
test_ts2char(ts, "add--yyyy-mm-dd hh24:mi:ss.nsamaaa", TSDB_TIME_PRECISION_MILLI,
"a13--2023-10-13 15:28:05.123000000pmaaa");
ts = 1693946405000;
test_ts2char(ts, "Day, Month dd, YYYY hh24:mi:ss AM TZH:tzh", TSDB_TIME_PRECISION_MILLI, "Wednesday, September 06, 2023 04:40:05 AM +08:+08");
test_ts2char(ts, "Day, Month dd, YYYY hh24:mi:ss AM TZH:tzh", TSDB_TIME_PRECISION_MILLI,
"Wednesday, September 06, 2023 04:40:05 AM +08:+08");
ts = -62198784343000; // milliseconds before epoch, Friday, January 1, -0001 12:00:00 AM GMT+08:06
test_ts2char(ts, "Day, Month dd, YYYY hh12:mi:ss AM", TSDB_TIME_PRECISION_MILLI, "Friday , January 01, -001 12:00:00 AM");
ts = -62198784343000; // milliseconds before epoch, Friday, January 1, -0001 12:00:00 AM GMT+08:06
test_ts2char(ts, "Day, Month dd, YYYY hh12:mi:ss AM", TSDB_TIME_PRECISION_MILLI,
"Friday , January 01, -001 12:00:00 AM");
}
TEST(timeTest, char2ts) {
@ -609,7 +615,7 @@ TEST(timeTest, char2ts) {
ASSERT_EQ(-1, TEST_char2ts("yyyyMMdd ", &ts, TSDB_TIME_PRECISION_MICRO, "2100/2/1"));
// nothing to be converted to dd
ASSERT_EQ(0, TEST_char2ts("yyyyMMdd ", &ts, TSDB_TIME_PRECISION_MICRO, "210012"));
ASSERT_EQ(ts, 4131273600000000LL); // 2100-12-1
ASSERT_EQ(ts, 4131273600000000LL); // 2100-12-1
ASSERT_EQ(-1, TEST_char2ts("yyyyMMdd ", &ts, TSDB_TIME_PRECISION_MICRO, "21001"));
ASSERT_EQ(-1, TEST_char2ts("yyyyMM-dd ", &ts, TSDB_TIME_PRECISION_MICRO, "23a1-1"));
@ -635,8 +641,55 @@ TEST(timeTest, char2ts) {
ASSERT_EQ(0, TEST_char2ts("yyyy年 MM/ddTZH", &ts, TSDB_TIME_PRECISION_MICRO, "1970年 1/1+0"));
ASSERT_EQ(ts, 0);
ASSERT_EQ(0, TEST_char2ts("yyyy年 a a a MM/ddTZH", &ts, TSDB_TIME_PRECISION_MICRO, "1970年 a a a 1/1+0"));
ASSERT_EQ(0, TEST_char2ts("yyyy年 a a a a a a a a a a a a a a a MM/ddTZH", &ts, TSDB_TIME_PRECISION_MICRO, "1970年 a "));
ASSERT_EQ(0, TEST_char2ts("yyyy年 a a a a a a a a a a a a a a a MM/ddTZH", &ts, TSDB_TIME_PRECISION_MICRO,
"1970年 a "));
ASSERT_EQ(-3, TEST_char2ts("yyyy-mm-DDD", &ts, TSDB_TIME_PRECISION_MILLI, "1970-01-001"));
}
TEST(timeTest, epSet) {
{
SEpSet ep = {0};
addEpIntoEpSet(&ep, "local", 14);
addEpIntoEpSet(&ep, "aocal", 13);
addEpIntoEpSet(&ep, "abcal", 12);
addEpIntoEpSet(&ep, "abcaleb", 11);
epsetSort(&ep);
ASSERT_EQ(strcmp(ep.eps[0].fqdn, "abcal"), 0);
ASSERT_EQ(ep.eps[0].port, 12);
ASSERT_EQ(strcmp(ep.eps[1].fqdn, "abcaleb"), 0);
ASSERT_EQ(ep.eps[1].port, 11);
ASSERT_EQ(strcmp(ep.eps[2].fqdn, "aocal"), 0);
ASSERT_EQ(ep.eps[2].port, 13);
ASSERT_EQ(strcmp(ep.eps[3].fqdn, "local"), 0);
ASSERT_EQ(ep.eps[3].port, 14);
}
{
SEpSet ep = {0};
addEpIntoEpSet(&ep, "local", 14);
addEpIntoEpSet(&ep, "local", 13);
addEpIntoEpSet(&ep, "local", 12);
addEpIntoEpSet(&ep, "local", 11);
epsetSort(&ep);
ASSERT_EQ(strcmp(ep.eps[0].fqdn, "local"), 0);
ASSERT_EQ(ep.eps[0].port, 11);
ASSERT_EQ(strcmp(ep.eps[0].fqdn, "local"), 0);
ASSERT_EQ(ep.eps[1].port, 12);
ASSERT_EQ(strcmp(ep.eps[0].fqdn, "local"), 0);
ASSERT_EQ(ep.eps[2].port, 13);
ASSERT_EQ(strcmp(ep.eps[0].fqdn, "local"), 0);
ASSERT_EQ(ep.eps[3].port, 14);
}
{
SEpSet ep = {0};
addEpIntoEpSet(&ep, "local", 14);
epsetSort(&ep);
ASSERT_EQ(ep.numOfEps, 1);
}
}
#pragma GCC diagnostic pop

View File

@ -169,11 +169,29 @@ static int32_t dmParseArgs(int32_t argc, char const *argv[]) {
return -1;
}
} else if (strcmp(argv[i], "-a") == 0) {
tstrncpy(global.apolloUrl, argv[++i], PATH_MAX);
if(i < argc - 1) {
if (strlen(argv[++i]) >= PATH_MAX) {
printf("apollo url overflow");
return -1;
}
tstrncpy(global.apolloUrl, argv[i], PATH_MAX);
} else {
printf("'-a' requires a parameter\n");
return -1;
}
} else if (strcmp(argv[i], "-s") == 0) {
global.dumpSdb = true;
} else if (strcmp(argv[i], "-E") == 0) {
tstrncpy(global.envFile, argv[++i], PATH_MAX);
if(i < argc - 1) {
if (strlen(argv[++i]) >= PATH_MAX) {
printf("env file path overflow");
return -1;
}
tstrncpy(global.envFile, argv[i], PATH_MAX);
} else {
printf("'-E' requires a parameter\n");
return -1;
}
} else if (strcmp(argv[i], "-k") == 0) {
global.generateGrant = true;
} else if (strcmp(argv[i], "-C") == 0) {

View File

@ -16,7 +16,6 @@
#define _DEFAULT_SOURCE
#include "dmInt.h"
#include "systable.h"
#include "tgrant.h"
extern SConfig *tsCfg;
@ -114,9 +113,11 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
req.updateTime = pMgmt->pData->updateTime;
req.numOfCores = tsNumOfCores;
req.numOfSupportVnodes = tsNumOfSupportVnodes;
req.numOfDiskCfg = tsDiskCfgNum;
req.memTotal = tsTotalMemoryKB * 1024;
req.memAvail = req.memTotal - tsRpcQueueMemoryAllowed - 16 * 1024 * 1024;
tstrncpy(req.dnodeEp, tsLocalEp, TSDB_EP_LEN);
tstrncpy(req.machineId, pMgmt->pData->machineId, TSDB_MACHINE_ID_LEN + 1);
req.clusterCfg.statusInterval = tsStatusInterval;
req.clusterCfg.checkTime = 0;
@ -319,7 +320,7 @@ int32_t dmAppendVariablesToBlock(SSDataBlock *pBlock, int32_t dnodeId) {
for (int32_t i = 0, c = 0; i < numOfCfg; ++i, c = 0) {
SConfigItem *pItem = taosArrayGet(tsCfg->array, i);
GRANT_CFG_SKIP;
// GRANT_CFG_SKIP;
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, c++);
colDataSetVal(pColInfo, i, (const char *)&dnodeId, false);
@ -429,7 +430,7 @@ SArray *dmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_DND_CONFIG_DNODE, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_DND_SERVER_STATUS, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_DND_SYSTABLE_RETRIEVE, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_DND_ALTER_MNODE_TYPE, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_DND_ALTER_MNODE_TYPE, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
// Requests handled by MNODE
if (dmSetMgmtHandle(pArray, TDMT_MND_GRANT, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;

View File

@ -163,7 +163,6 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_STREAM, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_PAUSE_STREAM, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_RESUME_STREAM, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_GRANT_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_RETRIEVE_IP_WHITE, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_GET_USER_WHITELIST, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
@ -193,6 +192,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_VIEW, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_VIEW_META, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_KILL_COMPACT, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_CONFIG_CLUSTER, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_COMPACT_PROGRESS_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_QUERY, mmPutMsgToQueryQueue, 1) == NULL) goto _OVER;
@ -223,6 +223,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_UPDATE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_RESET_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_HEARTBEAT, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_REQ_CHKPT, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_KILL_COMPACT_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_CONFIG_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;

View File

@ -84,13 +84,11 @@ SArray *smGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_PAUSE, smPutNodeMsgToMgmtQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_RESUME, smPutNodeMsgToMgmtQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_STOP, smPutNodeMsgToMgmtQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_HTASK_DROP, smPutNodeMsgToMgmtQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_CHECK, smPutNodeMsgToStreamQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_CHECK_RSP, smPutNodeMsgToStreamQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_SCAN_HISTORY_FINISH, smPutNodeMsgToStreamQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_SCAN_HISTORY_FINISH_RSP, smPutNodeMsgToStreamQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECKPOINT_READY, smPutNodeMsgToStreamQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_RESET, smPutNodeMsgToMgmtQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_HEARTBEAT_RSP, smPutNodeMsgToStreamQueue, 1) == NULL) goto _OVER;
code = 0;
_OVER:

View File

@ -828,18 +828,16 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DISPATCH_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_SCAN_HISTORY_FINISH, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_SCAN_HISTORY_FINISH_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_CHECK, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_CHECK_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_PAUSE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_RESUME, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_STOP, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_HTASK_DROP, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_CHECK_POINT_SOURCE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECKPOINT_READY, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_UPDATE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_RESET, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_HEARTBEAT_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_REPLICA, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_CONFIG, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;

View File

@ -72,7 +72,6 @@ typedef struct SUdfdData {
int32_t dnodeId;
} SUdfdData;
#ifndef TD_MODULE_OPTIMIZE
typedef struct SDnode {
int8_t once;
bool stop;
@ -86,21 +85,6 @@ typedef struct SDnode {
SMgmtWrapper wrappers[NODE_END];
SDnodeTrans trans;
} SDnode;
#else
typedef struct SDnode {
int8_t once;
bool stop;
EDndRunStatus status;
SStartupInfo startup;
SDnodeTrans trans;
SUdfdData udfdData;
TdThreadMutex mutex;
TdFilePtr lockfile;
SDnodeData data;
STfs *pTfs;
SMgmtWrapper wrappers[NODE_END];
} SDnode;
#endif
// dmEnv.c
SDnode *dmInstance();
@ -115,12 +99,7 @@ int32_t dmMarkWrapper(SMgmtWrapper *pWrapper);
void dmReleaseWrapper(SMgmtWrapper *pWrapper);
int32_t dmInitVars(SDnode *pDnode);
void dmClearVars(SDnode *pDnode);
#ifdef TD_MODULE_OPTIMIZE
int32_t dmInitModule(SDnode *pDnode, SMgmtWrapper *wrappers);
bool dmRequireNode(SDnode *pDnode, SMgmtWrapper *pWrapper);
#else
int32_t dmInitModule(SDnode *pDnode);
#endif
int32_t dmInitModule(SDnode *pDnode);
SMgmtInputOpt dmBuildMgmtInputOpt(SMgmtWrapper *pWrapper);
void dmSetStatus(SDnode *pDnode, EDndRunStatus stype);
void dmProcessServerStartupStatus(SDnode *pDnode, SRpcMsg *pMsg);
@ -143,11 +122,7 @@ void dmCleanupClient(SDnode *pDnode);
void dmCleanupStatusClient(SDnode *pDnode);
void dmCleanupSyncClient(SDnode *pDnode);
SMsgCb dmGetMsgcb(SDnode *pDnode);
#ifdef TD_MODULE_OPTIMIZE
int32_t dmInitMsgHandle(SDnode *pDnode, SMgmtWrapper *wrappers);
#else
int32_t dmInitMsgHandle(SDnode *pDnode);
#endif
int32_t dmProcessNodeMsg(SMgmtWrapper *pWrapper, SRpcMsg *pMsg);
// dmMonitor.c

View File

@ -22,9 +22,9 @@
#ifdef TD_TSZ
#include "tcompression.h"
#include "tglobal.h"
#include "tgrant.h"
#endif
#ifndef TD_MODULE_OPTIMIZE
static bool dmRequireNode(SDnode *pDnode, SMgmtWrapper *pWrapper) {
SMgmtInputOpt input = dmBuildMgmtInputOpt(pWrapper);
@ -38,7 +38,6 @@ static bool dmRequireNode(SDnode *pDnode, SMgmtWrapper *pWrapper) {
return required;
}
#endif
int32_t dmInitDnode(SDnode *pDnode) {
dDebug("start to create dnode");
@ -81,15 +80,9 @@ int32_t dmInitDnode(SDnode *pDnode) {
if (pDnode->lockfile == NULL) {
goto _OVER;
}
#ifdef TD_MODULE_OPTIMIZE
if (dmInitModule(pDnode, pDnode->wrappers) != 0) {
goto _OVER;
}
#else
if (dmInitModule(pDnode) != 0) {
goto _OVER;
}
#endif
indexInit(tsNumOfCommitThreads);
streamMetaInit();
@ -145,6 +138,16 @@ int32_t dmInitVars(SDnode *pDnode) {
pData->rebootTime = taosGetTimestampMs();
pData->dropped = 0;
pData->stopped = 0;
char *machineId = tGetMachineId();
if (machineId) {
tstrncpy(pData->machineId, machineId, TSDB_MACHINE_ID_LEN + 1);
taosMemoryFreeClear(machineId);
} else {
#if defined(TD_ENTERPRISE) && !defined(GRANTS_CFG)
terrno = TSDB_CODE_DNODE_NO_MACHINE_CODE;
return -1;
#endif
}
pData->dnodeHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
if (pData->dnodeHash == NULL) {

View File

@ -251,33 +251,6 @@ _OVER:
dmReleaseWrapper(pWrapper);
}
#ifdef TD_MODULE_OPTIMIZE
int32_t dmInitMsgHandle(SDnode *pDnode, SMgmtWrapper *wrappers) {
SDnodeTrans *pTrans = &pDnode->trans;
for (EDndNodeType ntype = DNODE; ntype < NODE_END; ++ntype) {
SMgmtWrapper *pWrapper = wrappers + ntype;
SArray *pArray = (*pWrapper->func.getHandlesFp)();
if (pArray == NULL) return -1;
for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) {
SMgmtHandle *pMgmt = taosArrayGet(pArray, i);
SDnodeHandle *pHandle = &pTrans->msgHandles[TMSG_INDEX(pMgmt->msgType)];
if (pMgmt->needCheckVgId) {
pHandle->needCheckVgId = pMgmt->needCheckVgId;
}
if (!pMgmt->needCheckVgId) {
pHandle->defaultNtype = ntype;
}
pWrapper->msgFps[TMSG_INDEX(pMgmt->msgType)] = pMgmt->msgFp;
}
taosArrayDestroy(pArray);
}
return 0;
}
#else
int32_t dmInitMsgHandle(SDnode *pDnode) {
SDnodeTrans *pTrans = &pDnode->trans;
@ -303,7 +276,6 @@ int32_t dmInitMsgHandle(SDnode *pDnode) {
return 0;
}
#endif
static inline int32_t dmSendReq(const SEpSet *pEpSet, SRpcMsg *pMsg) {
SDnode *pDnode = dmInstance();
@ -350,7 +322,7 @@ static bool rpcRfp(int32_t code, tmsg_t msgType) {
code == TSDB_CODE_SYN_RESTORING || code == TSDB_CODE_VND_STOPPED || code == TSDB_CODE_APP_IS_STARTING ||
code == TSDB_CODE_APP_IS_STOPPING) {
if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY || msgType == TDMT_SCH_FETCH ||
msgType == TDMT_SCH_MERGE_FETCH || msgType == TDMT_SCH_TASK_NOTIFY) {
msgType == TDMT_SCH_MERGE_FETCH || msgType == TDMT_SCH_TASK_NOTIFY || msgType == TDMT_VND_DROP_TTL_TABLE) {
return false;
}
return true;

View File

@ -109,6 +109,7 @@ typedef struct {
SMsgCb msgCb;
bool validMnodeEps;
int64_t ipWhiteVer;
char machineId[TSDB_MACHINE_ID_LEN + 1];
} SDnodeData;
typedef struct {

View File

@ -223,7 +223,7 @@ int32_t dmWriteEps(SDnodeData *pData) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
if((code == dmInitDndInfo(pData)) != 0) goto _OVER;
if ((code == dmInitDndInfo(pData)) != 0) goto _OVER;
pJson = tjsonCreateObject();
if (pJson == NULL) goto _OVER;
pData->engineVer = tsVersion;
@ -289,6 +289,7 @@ static void dmResetEps(SDnodeData *pData, SArray *dnodeEps) {
pData->mnodeEps.eps[mIndex] = pDnodeEp->ep;
mIndex++;
}
epsetSort(&pData->mnodeEps);
for (int32_t i = 0; i < numOfEps; i++) {
SDnodeEp *pDnodeEp = taosArrayGet(dnodeEps, i);

Some files were not shown because too many files have changed in this diff Show More