other: merge 3.0

This commit is contained in:
Haojun Liao 2024-01-30 18:35:59 +08:00
commit 556baed25d
162 changed files with 5085 additions and 1076 deletions

View File

@ -306,7 +306,7 @@ def pre_test_build_win() {
cd %WIN_CONNECTOR_ROOT%
python.exe -m pip install --upgrade pip
python -m pip uninstall taospy -y
python -m pip install taospy==2.7.12
python -m pip install taospy==2.7.13
python -m pip uninstall taos-ws-py -y
python -m pip install taos-ws-py==0.3.1
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32

View File

@ -12,7 +12,7 @@
[![Build Status](https://travis-ci.org/taosdata/TDengine.svg?branch=master)](https://travis-ci.org/taosdata/TDengine)
[![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=3.0)](https://coveralls.io/github/taosdata/TDengine?branch=3.0)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201)
简体中文 | [English](README.md) | [TDengine 云服务](https://cloud.taosdata.com/?utm_medium=cn&utm_source=github) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/cn/careers/)

View File

@ -12,7 +12,7 @@
[![Build Status](https://cloud.drone.io/api/badges/taosdata/TDengine/status.svg?ref=refs/heads/master)](https://cloud.drone.io/taosdata/TDengine)
[![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=3.0)](https://coveralls.io/github/taosdata/TDengine?branch=3.0)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201)
<br />
[![Twitter Follow](https://img.shields.io/twitter/follow/tdenginedb?label=TDengine&style=social)](https://twitter.com/tdenginedb)

View File

@ -1,6 +1,6 @@
# openssl
ExternalProject_Add(openssl
URL https://www.openssl.org/source/openssl-3.1.3.tar.gz
URL https://github.com/openssl/openssl/releases/download/openssl-3.1.3/openssl-3.1.3.tar.gz
URL_HASH SHA256=f0316a2ebd89e7f2352976445458689f80302093788c466692fb2a188b2eacf6
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"

View File

@ -101,7 +101,7 @@ Query OK, 2 row(s) in set (0.004076s)
## Query Examples
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1}, here is the query SQL:
If you want query the data of "tags": &lcub;"location": "California.LosAngeles", "groupid": 1&rcub;, here is the query SQL:
```sql
SELECT * FROM `meters_current` WHERE location = "California.LosAngeles" AND groupid = 3;

View File

@ -22,7 +22,7 @@ import CAsync from "./_c_async.mdx";
SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or client libraries. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine:
- Query on single column or multiple columns
- Filter on tags or data columns: >, <, =, <\>, like
- Filter on tags or data columns: &gt;, &lt;, =, &lt;&gt;, like
- Grouping of results: `Group By` - Sorting of results: `Order By` - Limit the number of results: `Limit/Offset`
- Windowed aggregate queries for time windows (interval), session windows (session), and state windows (state_window)
- Arithmetic on columns of numeric types or aggregate results
@ -159,7 +159,7 @@ In the section describing [Insert](../insert-data/sql-writing), a database named
:::note
1. With either REST connection or native connection, the above sample code works well.
2. Please note that `use db` can't be used in case of REST connection because it's stateless. You can specify the database name by either the REST endpoint's parameter or <db_name>.<table_name> in the SQL command.
2. Please note that `use db` can't be used in case of REST connection because it's stateless. You can specify the database name by either the REST endpoint's parameter or &lt;db_name&gt;.&lt;table_name&gt; in the SQL command.
:::

View File

@ -104,7 +104,7 @@ Replace `aggfn` with the name of your function.
### UDF Interface Definition in C
There are strict naming conventions for interface functions. The names of the start, finish, init, and destroy interfaces must be <udf-name\>_start, <udf-name\>_finish, <udf-name\>_init, and <udf-name\>_destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
There are strict naming conventions for interface functions. The names of the start, finish, init, and destroy interfaces must be &lt;udf-name&gt;_start, &lt;udf-name&gt;_finish, &lt;udf-name&gt;_init, and &lt;udf-name&gt;_destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
Interface functions return a value that indicates whether the operation was successful. If an operation fails, the interface function returns an error code. Otherwise, it returns TSDB_CODE_SUCCESS. The error codes are defined in `taoserror.h` and in the common API error codes in `taos.h`. For example, TSDB_CODE_UDF_INVALID_INPUT indicates invalid input. TSDB_CODE_OUT_OF_MEMORY indicates insufficient memory.
@ -194,7 +194,7 @@ typedef struct SUdfInterBuf {
```
The data structure is described as follows:
- The SUdfDataBlock block includes the number of rows (numOfRows) and the number of columns (numCols). udfCols[i] (0 <= i <= numCols-1) indicates that each column is of type SUdfColumn.
- The SUdfDataBlock block includes the number of rows (numOfRows) and the number of columns (numCols). udfCols[i] (0 &lt;= i &lt;= numCols-1) indicates that each column is of type SUdfColumn.
- SUdfColumn includes the definition of the data type of the column (colMeta) and the data in the column (colData).
- The member definitions of SUdfColumnMeta are the same as the data type definitions in `taos.h`.
- The data in SUdfColumnData can become longer. varLenCol indicates variable-length data, and fixLenCol indicates fixed-length data.

View File

@ -186,7 +186,7 @@ The base API is used to do things like create database connections and provide a
- The variables database and len are applied by the user outside and allocated space. The current database name and length will be assigned to database and len.
- As long as the db name is not assigned to the database normally (including truncation), an error will be returned with the return value of -1, and then the user can use taos_errstr(NULL) to get error message.
- If database==NULL or len<=0, returns an error, the space required to store the db (including the last '\0') in the variable required
- If database==NULL or len&lt;=0, returns an error, the space required to store the db (including the last '\0') in the variable required
- If len is less than the space required to store the db (including the last '\0'), an error is returned. The truncated data assigned in the database ends with '\0'.
- If len is greater than or equal to the space required to store the db (including the last '\0'), return normal 0, and assign the db name ending with '\0' in the database.

View File

@ -69,7 +69,7 @@ TDengine currently supports timestamp, number, character, Boolean type, and the
| SMALLINT | i16 |
| TINYINT | i8 |
| BOOL | bool |
| BINARY | Vec<u8\> |
| BINARY | Vec&lt;u8&gt; |
| NCHAR | String |
| JSON | serde_json::Value |

View File

@ -315,7 +315,7 @@ The `connect()` function returns a `taos.TaosConnection` instance. In client-sid
All arguments to the `connect()` function are optional keyword arguments. The following are the connection parameters specified.
- `url`: The URL of taosAdapter REST service. The default is <http://localhost:6041>.
- `url`: The URL of taosAdapter REST service. The default is `http://localhost:6041`.
- `user`: TDengine user name. The default is `root`.
- `password`: TDengine user password. The default is `taosdata`.
- `timeout`: HTTP request timeout. Enter a value in seconds. The default is `socket._GLOBAL_DEFAULT_TIMEOUT`. Usually, no configuration is needed.

View File

@ -8,7 +8,7 @@ description: This document describes the TDengine PHP client library.
PHP client library relies on TDengine client driver.
Project Repository: <https://github.com/Yurunsoft/php-tdengine>
Project Repository: [https://github.com/Yurunsoft/php-tdengine](https://github.com/Yurunsoft/php-tdengine)
After TDengine client or server is installed, `taos.h` is located at:

View File

@ -68,14 +68,14 @@ TDengine supports a variety of constants:
| # | **Syntax** | **Type** | **Description** |
| --- | :-----------------------------------------------: | --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 1 | [{+ \| -}]123 | BIGINT | Integer literals are of type BIGINT. Data that exceeds the length of the BIGINT type is truncated. |
| 1 | [+ \| -]123 | BIGINT | Integer literals are of type BIGINT. Data that exceeds the length of the BIGINT type is truncated. |
| 2 | 123.45 | DOUBLE | Floating-point literals are of type DOUBLE. Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used. |
| 3 | 1.2E3 | DOUBLE | Literals in scientific notation are of type DOUBLE. |
| 4 | 'abc' | BINARY | Content enclosed in single quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal single quote inside the string must be escaped with a backslash `\'`. |
| 5 | 'abc' | BINARY | Content enclosed in double quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal double quote inside the string must be escaped with a backslash `\"`. |
| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | The TIMESTAMP keyword indicates that the following string literal is interpreted as a timestamp. The string must be in YYYY-MM-DD HH:mm:ss.MS format. The precision is inherited from the database configuration. |
| 7 | {TRUE \| FALSE} | BOOL | Boolean literals are of type BOOL. |
| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | The preceding characters indicate null literals. These can be used with any data type. |
| 6 | TIMESTAMP ['literal' \| "literal"] | TIMESTAMP | The TIMESTAMP keyword indicates that the following string literal is interpreted as a timestamp. The string must be in YYYY-MM-DD HH:mm:ss.MS format. The precision is inherited from the database configuration. |
| 7 | [TRUE \| FALSE] | BOOL | Boolean literals are of type BOOL. |
| 8 | ['' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL ] | -- | The preceding characters indicate null literals. These can be used with any data type. |
:::note
Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.

View File

@ -56,7 +56,7 @@ database_option: {
- WAL_FSYNC_PERIOD: specifies the interval (in milliseconds) at which data is written from the WAL to disk. This parameter takes effect only when the WAL parameter is set to 2. The default value is 3000. Enter a value between 0 and 180000. The value 0 indicates that incoming data is immediately written to disk.
- MAXROWS: specifies the maximum number of rows recorded in a block. The default value is 4096.
- MINROWS: specifies the minimum number of rows recorded in a block. The default value is 100.
- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to three times of the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. TDengine Enterprise supports [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function, thus multiple KEEP values (comma separated and up to 3 values supported, and meet keep 0 <= keep 1 <= keep 2, e.g. KEEP 100h,100d,3650d) are supported; TDengine OSS does not support Tiered Storage function (although multiple keep values are configured, they do not take effect, only the maximum keep value is used as KEEP).
- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to three times of the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. TDengine Enterprise supports [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function, thus multiple KEEP values (comma separated and up to 3 values supported, and meet keep 0 &lt;= keep 1 &lt;= keep 2, e.g. KEEP 100h,100d,3650d) are supported; TDengine OSS does not support Tiered Storage function (although multiple keep values are configured, they do not take effect, only the maximum keep value is used as KEEP).
- PAGES: specifies the number of pages in the metadata storage engine cache on each vnode. Enter a value greater than or equal to 64. The default value is 256. The space occupied by metadata storage on each vnode is equal to the product of the values of the PAGESIZE and PAGES parameters. The space occupied by default is 1 MB.
- PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384.
- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms.

View File

@ -491,6 +491,8 @@ TO_CHAR(ts, format_str_literal)
**Description**: Convert a ts column to string as the format specified
**Version**: Since ver-3.2.2.0
**Return value type**: VARCHAR
**Applicable column types**: TIMESTAMP
@ -550,6 +552,8 @@ TO_TIMESTAMP(ts_str_literal, format_str_literal)
**Description**: Convert a formated timestamp string to a timestamp
**Version**: Since ver-3.2.2.0
**Return value type**: TIMESTAMP
**Applicable column types**: VARCHAR
@ -877,11 +881,11 @@ HISTOGRAM(expr, bin_type, bin_description, normalized)
- "user_input": "[1, 3, 5, 7]":
User specified bin values.
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
- "linear_bin": "&lcub;"start": 0.0, "width": 5.0, "count": 5, "infinity": true&rcub;"
"start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add (-inf, inf) as start/end point in generated set of bins.
The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf].
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
- "log_bin": "&lcub;"start":1.0, "factor": 2.0, "count": 5, "infinity": true&rcub;"
"start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add (-inf, inf) as start/end point in generated range of bins.
The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf].
- normalized: setting to 1/0 to turn on/off result normalization. Valid values are 0 or 1.
@ -977,7 +981,7 @@ ignore_null_values: {
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
- `INTERP` must be used along with `RANGE`, `EVERY`, `FILL` keywords.
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range. timestamp2 is the ending point of the output time range.
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 &lt;= timestamp2. timestamp1 is the starting point of the output time range. timestamp2 is the ending point of the output time range.
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
- When only one timestamp value is specified in `RANGE` clause, `INTERP` is used to generate interpolation at this point in time. In this case, `EVERY` clause can be omitted. For example, SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear).

View File

@ -67,7 +67,7 @@ If a stream is created with PARTITION BY clause and SUBTABLE clause, the name of
CREATE STREAM avg_vol_s INTO avg_vol SUBTABLE(CONCAT('new-', tname)) AS SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname tname INTERVAL(1m);
```
IN PARTITION clause, 'tbname', representing each subtable name of source supertable, is given alias 'tname'. And 'tname' is used in SUBTABLE clause. In SUBTABLE clause, each auto created subtable will concat 'new-' and source subtable name as their name. Other expressions are also allowed in SUBTABLE clause, but the output type must be varchar.
IN PARTITION clause, 'tbname', representing each subtable name of source supertable, is given alias 'tname'. And 'tname' is used in SUBTABLE clause. In SUBTABLE clause, each auto created subtable will concat 'new-' and source subtable name as their name(Starting from 3.2.3.0, in order to avoid the expression in subtable being unable to distinguish between different subtables, add '_groupId' to the end of subtable name).
If the output length exceeds the limitation of TDengine(192), the name will be truncated. If the generated name is occupied by some other table, the creation and writing of the new subtable will be failed.

View File

@ -35,9 +35,9 @@ TDengine supports the `UNION` and `UNION ALL` operations. UNION ALL collects all
| # | **Operator** | **Supported Data Types** | **Description** |
| --- | :---------------: | -------------------------------------------------------------------- | -------------------- |
| 1 | = | All types except BLOB, MEDIUMBLOB, and JSON | Equal to |
| 2 | <\>, != | All types except BLOB, MEDIUMBLOB, and JSON; the primary key (timestamp) is also not supported | Not equal to |
| 3 | \>, < | All types except BLOB, MEDIUMBLOB, and JSON | Greater than and less than |
| 4 | \>=, <= | All types except BLOB, MEDIUMBLOB, and JSON | Greater than or equal to and less than or equal to |
| 2 | &lt;&gt;, != | All types except BLOB, MEDIUMBLOB, and JSON; the primary key (timestamp) is also not supported | Not equal to |
| 3 | &gt;, &lt; | All types except BLOB, MEDIUMBLOB, and JSON | Greater than and less than |
| 4 | &gt;=, &lt;= | All types except BLOB, MEDIUMBLOB, and JSON | Greater than or equal to and less than or equal to |
| 5 | IS [NOT] NULL | All types | Indicates whether the value is null |
| 6 | [NOT] BETWEEN AND | All types except BLOB, MEDIUMBLOB, JSON and GEOMETRY | Closed interval comparison |
| 7 | IN | All types except BLOB, MEDIUMBLOB, and JSON; the primary key (timestamp) is also not supported | Equal to any value in the list |

View File

@ -71,7 +71,7 @@ The following data types can be used in the schema for standard tables.
| 44 | SHOW STREAMS | Modified | This statement previously showed continuous queries. The continuous query feature has been replaced with the stream processing feature. This statement now shows streams that have been created.
| 45 | SHOW SUBSCRIPTIONS | Added | Shows all subscriptions in the current database.
| 46 | SHOW TABLES | Modified | Only shows table names.
| 47 | SHOW TABLE DISTRIBUTED | Added | Shows how table data is distributed. This replaces the `SELECT _block_dist() FROM { tb_name | stb_name }` command.
| 47 | SHOW TABLE DISTRIBUTED | Added | Shows how table data is distributed. This replaces the `SELECT _block_dist() FROM &lcub; tb_name | stb_name &rcub;` command.
| 48 | SHOW TOPICS | Added | Shows all subscribed topics in the current database.
| 49 | SHOW TRANSACTIONS | Added | Shows all running transactions in the system.
| 50 | SHOW DNODE VARIABLES | Added | Shows the configuration of the specified dnode.

View File

@ -15,7 +15,7 @@ Diagnostic steps:
2. On the server side, execute command `taos -n server -P <port> -l <pktlen>` to monitor the port range starting from the port specified by `-P` parameter with the role of "server".
3. On the client side, execute command `taos -n client -h <fqdn of server> -P <port> -l <pktlen>` to send a testing package to the specified server and port.
-l <pktlen\>: The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
-l &lt;pktlen&gt;: The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
Please note that the package length must be same in the above 2 commands executed on server side and client side respectively.
Output of the server side for the example is below:
@ -63,7 +63,7 @@ Once this parameter is set to 135 or 143, the log file grows very quickly especi
## Client Log
An independent log file, named as "taoslog+<seq num\>" is generated for each client program, i.e. a client process. The parameter `debugFlag` is used to control the log level. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively.
An independent log file, named as "taoslog+&lt;seq num&gt;" is generated for each client program, i.e. a client process. The parameter `debugFlag` is used to control the log level. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively.
The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded. As stated above, for debugging and tracing, it needs to be changed to 135 or 143 respectively, so that logs at DEBUG or TRACE level can be recorded.

View File

@ -81,7 +81,7 @@ Parameter Description:
:::note
URL Encoding. Make sure that parameters are properly encoded. For example, when specifying a timezone you must properly encode special characters. ?tz=Etc/GMT+10 will not work because the <+> plus symbol is recognized as a space in the url. It's best practice to encode all special characters in a parameter. Instead use ?tz=Etc%2FGMT%2B10 for the parameter.
URL Encoding. Make sure that parameters are properly encoded. For example, when specifying a timezone you must properly encode special characters. ?tz=Etc/GMT+10 will not work because the + plus symbol is recognized as a space in the url. It's best practice to encode all special characters in a parameter. Instead use ?tz=Etc%2FGMT%2B10 for the parameter.
:::

View File

@ -166,8 +166,8 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl
- Compatible with InfluxDB v1 write interface
[https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/)
- Compatible with OpenTSDB JSON and telnet format writes
- <http://opentsdb.net/docs/build/html/api_http/put.html>
- <http://opentsdb.net/docs/build/html/api_telnet/put.html>
- [http://opentsdb.net/docs/build/html/api_http/put.html](http://opentsdb.net/docs/build/html/api_http/put.html)
- [http://opentsdb.net/docs/build/html/api_telnet/put.html](http://opentsdb.net/docs/build/html/api_telnet/put.html)
- Seamless connection to collectd
collectd is a system statistics collection daemon, please visit [https://collectd.org/](https://collectd.org/) for more information.
- Seamless connection with StatsD

View File

@ -94,67 +94,67 @@ taosBenchmark -f <json file>
## Command-line argument in detail
- **-f/--file <json file\>** :
- **-f/--file &lt;json file&gt;** :
specify the configuration file to use. This file includes All parameters. Users should not use this parameter with other parameters on the command-line. There is no default value.
- **-c/--config-dir <dir\>** :
- **-c/--config-dir &lt;dir&gt;** :
specify the directory where the TDengine cluster configuration file. The default path is `/etc/taos`.
- **-h/--host <host\>** :
- **-h/--host &lt;host&gt;** :
Specify the FQDN of the TDengine server to connect to. The default value is localhost.
- **-P/--port <port\>** :
- **-P/--port &lt;port&gt;** :
The port number of the TDengine server to connect to, the default value is 6030.
- **-I/--interface <insertMode\>** :
- **-I/--interface &lt;insertMode&gt;** :
Insert mode. Options are taosc, rest, stmt, sml, sml-rest, corresponding to normal write, restful interface writing, parameter binding interface writing, schemaless interface writing, RESTful schemaless interface writing (provided by taosAdapter). The default value is taosc.
- **-u/--user <user\>** :
- **-u/--user &lt;user&gt;** :
User name to connect to the TDengine server. Default is root.
- **-U/--supplement-insert ** :
Supplementally insert data without create database and table, optional, default is off.
- **-p/--password <passwd\>** :
- **-p/--password &lt;passwd&gt;** :
The default password to connect to the TDengine server is `taosdata`.
- **-o/--output <file\>** :
- **-o/--output &lt;file&gt;** :
specify the path of the result output file, the default value is `. /output.txt`.
- **-T/--thread <threadNum\>** :
- **-T/--thread &lt;threadNum&gt;** :
The number of threads to insert data. Default is 8.
- **-B/--interlace-rows <rowNum\>** :
- **-B/--interlace-rows &lt;rowNum&gt;** :
Enables interleaved insertion mode and specifies the number of rows of data to be inserted into each child table. Interleaved insertion mode means inserting the number of rows specified by this parameter into each sub-table and repeating the process until all sub-tables have been inserted. The default value is 0, i.e., data is inserted into one sub-table before the next sub-table is inserted.
- **-i/--insert-interval <timeInterval\>** :
- **-i/--insert-interval &lt;timeInterval&gt;** :
Specify the insert interval in `ms` for interleaved insert mode. The default value is 0. It only works if `-B/--interlace-rows` is greater than 0. After inserting interlaced rows for each child table, the data insertion thread will wait for the interval specified by this value before proceeding to the next round of writes.
- **-r/--rec-per-req <rowNum\>** :
- **-r/--rec-per-req &lt;rowNum&gt;** :
Writing the number of rows of records per request to TDengine, the default value is 30000.
- **-t/--tables <tableNum\>** :
- **-t/--tables &lt;tableNum&gt;** :
Specify the number of sub-tables. The default is 10000.
- **-S/--timestampstep <stepLength\>** :
- **-S/--timestampstep &lt;stepLength&gt;** :
Timestamp step for inserting data in each child table in ms, default is 1.
- **-n/--records <recordNum\>** :
- **-n/--records &lt;recordNum&gt;** :
The default value of the number of records inserted in each sub-table is 10000.
- **-d/--database <dbName\>** :
- **-d/--database &lt;dbName&gt;** :
The name of the database used, the default value is `test`.
- **-b/--data-type <colType\>** :
- **-b/--data-type &lt;colType&gt;** :
specify the type of the data columns of the super table. It defaults to three columns of type FLOAT, INT, and FLOAT if not used.
- **-l/--columns <colNum\>** :
- **-l/--columns &lt;colNum&gt;** :
specify the number of columns in the super table. If both this parameter and `-b/--data-type` is set, the final result number of columns is the greater of the two. If the number specified by this parameter is greater than the number of columns specified by `-b/--data-type`, the unspecified column type defaults to INT, for example: `-l 5 -b float,double`, then the final column is `FLOAT,DOUBLE,INT,INT,INT`. If the number of columns specified is less than or equal to the number of columns specified by `-b/--data-type`, then the result is the column and type specified by `-b/--data-type`, e.g.: `-l 3 -b float,double,float,bigint`. The last column is `FLOAT,DOUBLE, FLOAT,BIGINT`.
- **-L/--partial-col-num <colNum\> ** :
- **-L/--partial-col-num &lt;colNum&gt; ** :
Specify first numbers of columns has data. Rest of columns' data are NULL. Default is all columns have data.
- **-A/--tag-type <tagType\>** :
- **-A/--tag-type &lt;tagType&gt;** :
The tag column type of the super table. nchar and binary types can both set the length, for example:
```
@ -168,10 +168,10 @@ Note: In some shells, such as bash, "()" needs to be escaped, so the above comma
taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
```
- **-w/--binwidth <length\>**:
- **-w/--binwidth &lt;length&gt;**:
specify the default length for nchar and binary types. The default value is 64.
- **-m/--table-prefix <tablePrefix\>** :
- **-m/--table-prefix &lt;tablePrefix&gt;** :
The prefix of the sub-table name, the default value is "d".
- **-E/--escape-character** :
@ -192,25 +192,25 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
- **-y/--answer-yes** :
Switch parameter that requires the user to confirm at the prompt to continue. The default value is false.
- **-O/--disorder <Percentage\>** :
- **-O/--disorder &lt;Percentage&gt;** :
Specify the percentage probability of disordered data, with a value range of [0,50]. The default is 0, i.e., there is no disordered data.
- **-R/--disorder-range <timeRange\>** :
- **-R/--disorder-range &lt;timeRange&gt;** :
Specify the timestamp range for the disordered data. It leads the resulting disorder timestamp as the ordered timestamp minus a random value in this range. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0.
- **-F/--prepared_rand <Num\>** :
- **-F/--prepared_rand &lt;Num&gt;** :
Specify the number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
- **-a/--replica <replicaNum\>** :
- **-a/--replica &lt;replicaNum&gt;** :
Specify the number of replicas when creating the database. The default value is 1.
- **-k/--keep-trying <NUMBER\>** :
- **-k/--keep-trying &lt;NUMBER&gt;** :
Keep trying if failed to insert, default is no. Available with v3.0.9+.
- **-z/--trying-interval <NUMBER\>** :
- **-z/--trying-interval &lt;NUMBER&;gt;** :
Specify interval between keep trying insert. Valid value is a positive number. Only valid when keep trying be enabled. Available with v3.0.9+.
- **-v/--vgroups <NUMBER\>** :
- **-v/--vgroups &lt;NUMBER&gt;** :
Specify vgroups number for creating a database, only valid with daemon version 3.0+
- **-V/--version** :
@ -226,7 +226,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
The parameters listed in this section apply to all function modes.
- **filetype** : The function to be tested, with optional values `insert`, `query` and `subscribe`. These correspond to the insert, query, and subscribe functions, respectively. Users can specify only one of these in each configuration file.
**cfgdir**: specify the TDengine client configuration file's directory. The default path is /etc/taos.
**cfgdir**: specify the TDengine client configuration file's directory. The default path is `/etc/taos`.
- **host**: Specify the FQDN of the TDengine server to connect. The default value is `localhost`.

View File

@ -226,9 +226,10 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
| Attribute | Description |
| ------------- | --------------------------------------------------------------------------------------------------------------- |
| Applicable | Client only |
| Meaning | When the Last, First, LastRow function is queried, whether the returned column name contains the function name. |
| Value Range | 0 means including the function name, 1 means not including the function name. |
| Meaning | When the Last, First, and LastRow functions are queried and no alias is specified, the alias is automatically set to the column name (excluding the function name). Therefore, if the order by clause refers to the column name, it will automatically refer to the function corresponding to the column. |
| Value Range | 1 means automatically setting the alias to the column name (excluding the function name), 0 means not automatically setting the alias. |
| Default Value | 0 |
| Notes | When multiple of the above functions act on the same column at the same time and no alias is specified, if the order by clause refers to the column name, column selection ambiguous will occur because the aliases of multiple columns are the same. |
## Locale Parameters
@ -288,7 +289,7 @@ A specific type "nchar" is provided in TDengine to store non-ASCII characters su
The characters input on the client side are encoded using the default system encoding, which is UTF-8 on Linux/macOS, or GB18030 or GBK on some systems in Chinese, POSIX in docker, CP936 on Windows in Chinese. The encoding of the operating system in use must be set correctly so that the characters in nchar type can be converted to UCS4-LE.
The locale definition standard on Linux/macOS is: <Language\>\_<Region\>.<charset\>, for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. The charset indicates how to display the characters. On Linux/macOS, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux/macOS to specify the charset.
The locale definition standard on Linux/macOS is: &lt;Language&gt;\_&lt;Region&gt;.&lt;charset&gt;, for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. The charset indicates how to display the characters. On Linux/macOS, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux/macOS to specify the charset.
:::

View File

@ -93,9 +93,13 @@ Note that tag_key1, tag_key2 are not the original order of the tags entered by t
The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t\_" is a fixed prefix that every table generated by this mapping relationship has.
:::
If you do not want to use an automatically generated table name, there are two ways to specify sub table names, the first one has a higher priority.
You can configure smlAutoChildTableNameDelimiter in taos.cfg(except for `@ # space \r \t \n`), for example, `smlAutoChildTableNameDelimiter=tname`. You can insert `st,t0=cpul,t1=4 c1=3 1626006833639000000` and the table name will be cpu1-4.
You can configure smlChildTableName in taos.cfg to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
If you do not want to use an automatically generated table name, there are two ways to specify sub table names(the first one has a higher priority).
1. You can configure smlAutoChildTableNameDelimiter in taos.cfg(except for `@ # space \r \t \n`).
1. For example, `smlAutoChildTableNameDelimiter=tname`. You can insert `st,t0=cpul,t1=4 c1=3 1626006833639000000` and the table name will be cpu1-4.
2. You can configure smlChildTableName in taos.cfg to specify table names.
2. For example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
2. If the super table obtained by parsing the line protocol does not exist, this super table is created.
**Important:** Manually creating supertables for schemaless writing is not supported. Schemaless writing creates appropriate supertables automatically.

View File

@ -36,7 +36,7 @@ LoadPlugin network
</Plugin>
```
where <taosAdapter's host\> fills in the server's domain name or IP address running taosAdapter. <port for collectd direct\> fills in the port that taosAdapter uses to receive collectd data (default is 6045).
where &lt;taosAdapter's host&gt; fills in the server's domain name or IP address running taosAdapter. &lt;port for collectd direct&gt; fills in the port that taosAdapter uses to receive collectd data (default is 6045).
An example is as follows.
@ -62,7 +62,7 @@ LoadPlugin write_tsdb
</Plugin>
```
Where <taosAdapter's host\> is the domain name or IP address of the server running taosAdapter. <port for collectd write_tsdb plugin\> Fill in the data that taosAdapter uses to receive the collectd write_tsdb plugin (default is 6047).
Where &lt;taosAdapter's host&gt; is the domain name or IP address of the server running taosAdapter. &lt;port for collectd write_tsdb plugin&gt; Fill in the data that taosAdapter uses to receive the collectd write_tsdb plugin (default is 6047).
```text
LoadPlugin write_tsdb

View File

@ -26,7 +26,7 @@ The default database name written by the taosAdapter is `icinga2`. You can also
### Configure icinga3
- Enable opentsdb-writer for icinga2 (refer to the link https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer)
- Modify the configuration file `/etc/icinga2/features-enabled/opentsdb.conf` by filling in <taosAdapter's host\> as the domain name or IP address of the server running taosAdapter and <port for icinga2\> as the corresponding port on which taosAdapter supports receiving icinga2 data (default is 6048)
- Modify the configuration file `/etc/icinga2/features-enabled/opentsdb.conf` by filling in &lt;taosAdapter's host&gt; as the domain name or IP address of the server running taosAdapter and &lt;port for icinga2&gt; as the corresponding port on which taosAdapter supports receiving icinga2 data (default is 6048)
```
object OpenTsdbWriter "opentsdb" {

View File

@ -9,8 +9,8 @@ Point the `remote_read url` and `remote_write url` to the domain name or IP addr
### Configure Basic authentication
- username: <TDengine's username>
- password: <TDengine's password>
- username: TDengine's username
- password: TDengine's password
### Example configuration of remote_write and remote_read related sections in prometheus.yml file

View File

@ -31,7 +31,7 @@ The default database name written by taosAdapter is `statsd`. To specify a diffe
### Configuring StatsD
To use StatsD, you need to download its [source code](https://github.com/statsd/statsd). Please refer to the example file `exampleConfig.js` in the root directory of the source download to modify the configuration file. In <taosAdapter's host\>, please fill in the domain name or IP address of the server running taosAdapter, and <port for StatsD\>, please fill in the port where taosAdapter receives StatsD data (default is 6044).
To use StatsD, you need to download its [source code](https://github.com/statsd/statsd). Please refer to the example file `exampleConfig.js` in the root directory of the source download to modify the configuration file. In &lt;taosAdapter's host&gt;, please fill in the domain name or IP address of the server running taosAdapter, and &lt;port for StatsD&gt;, please fill in the port where taosAdapter receives StatsD data (default is 6044).
```
backends section add ". /backends/repeater"

View File

@ -10,7 +10,7 @@ In the Telegraf configuration file (default location `/etc/telegraf/telegraf.con
...
```
Where <taosAdapter's host\> please fill in the server's domain name or IP address running the taosAdapter service. <REST service port\> please fill in the port of the REST service (default is 6041). <TDengine's username\> and <TDengine's password\> please fill in the actual configuration of the currently running TDengine. And <database name\> please fill in the database name where you want to store Telegraf data in TDengine.
Where &lt;taosAdapter's host&gt; please fill in the server's domain name or IP address running the taosAdapter service. &lt;REST service port&gt; please fill in the port of the REST service (default is 6041). &lt;TDengine's username&gt; and &lt;TDengine's password&gt; please fill in the actual configuration of the currently running TDengine. And &lt;database name&gt; please fill in the database name where you want to store Telegraf data in TDengine.
An example is as follows.

View File

@ -23,7 +23,7 @@ Record these values:
## Installing Grafana
TDengine currently supports Grafana versions 7.5 and above. Users can go to the Grafana official website to download the installation package and execute the installation according to the current operating system. The download address is as follows: <https://grafana.com/grafana/download>.
TDengine currently supports Grafana versions 7.5 and above. Users can go to the Grafana official website to download the installation package and execute the installation according to the current operating system. The download address is as follows: [https://grafana.com/grafana/download](https://grafana.com/grafana/download).
## Configuring Grafana
@ -59,7 +59,7 @@ bash -c "$(curl -fsSL \
-p taosdata
```
Restart Grafana service and open Grafana in web-browser, usually <http://localhost:3000>.
Restart Grafana service and open Grafana in web-browser, usually `http://localhost:3000`.
Save the script and type `./install.sh --help` for the full usage of the script.
@ -181,7 +181,7 @@ You can setup a zero-configuration stack for TDengine + Grafana by [docker-compo
3. Start TDengine and Grafana services: `docker-compose up -d`.
Open Grafana <http://localhost:3000>, and you can add dashboard with TDengine now.
Open Grafana (http://localhost:3000), and you can add dashboard with TDengine now.
</TabItem>
</Tabs>
@ -202,7 +202,7 @@ As shown above, select the `TDengine` data source in the `Query` and enter the c
:::note
Since the REST connection because is stateless. Grafana plugin can use <db_name>.<table_name> in the SQL command to specify the database name.
Since the REST connection because is stateless. Grafana plugin can use &lt;db_name&gt;.&lt;table_name&gt; in the SQL command to specify the database name.
:::

View File

@ -345,7 +345,7 @@ The following configuration items apply to TDengine Sink Connector and TDengine
### TDengine Sink Connector specific configuration
1. `connection.database`: The name of the target database. If the specified database does not exist, it will be created automatically. The time precision used for automatic library building is nanoseconds. The default value is null. When it is NULL, refer to the description of the `connection.database.prefix` parameter for the naming rules of the target database
2. `connection.database.prefix`: When `connection.database` is null, the prefix of the target database. Can contain placeholder '${topic}'. For example, kafka_${topic}, for topic 'orders' will be written to database 'kafka_orders'. Default null. When null, the name of the target database is the same as the name of the topic.
2. `connection.database.prefix`: When `connection.database` is null, the prefix of the target database. Can contain placeholder '$&lcub;topic&rcub;'. For example, kafka_$&lcub;topic&rcub;, for topic 'orders' will be written to database 'kafka_orders'. Default null. When null, the name of the target database is the same as the name of the topic.
3. `batch.size`: Write the number of records in each batch in batches. When the data received by the sink connector at one time is larger than this value, it will be written in some batches.
4. `max.retries`: The maximum number of retries when an error occurs. Defaults to 1.
5. `retry.backoff.ms`: The time interval for retry when sending an error. The unit is milliseconds. The default is 3000.
@ -370,12 +370,12 @@ The following configuration items apply to TDengine Sink Connector and TDengine
## Other notes
1. To use Kafka Connect, refer to <https://kafka.apache.org/documentation/#connect>.
1. To use Kafka Connect, refer to [https://kafka.apache.org/documentation/#connect](https://kafka.apache.org/documentation/#connect).
## Feedback
<https://github.com/taosdata/kafka-connect-tdengine/issues>
[https://github.com/taosdata/kafka-connect-tdengine/issues](https://github.com/taosdata/kafka-connect-tdengine/issues)
## Reference
1. For more information, see <https://kafka.apache.org/documentation/>
1. For more information, see [https://kafka.apache.org/documentation/](https://kafka.apache.org/documentation/).

View File

@ -491,6 +491,8 @@ TO_CHAR(ts, format_str_literal)
**功能说明**: 将timestamp类型按照指定格式转换为字符串
**版本**: ver-3.2.2.0
**返回结果数据类型**: VARCHAR
**应用字段**: TIMESTAMP
@ -550,6 +552,8 @@ TO_TIMESTAMP(ts_str_literal, format_str_literal)
**功能说明**: 将字符串按照指定格式转化为时间戳.
**版本**: ver-3.2.2.0
**返回结果数据类型**: TIMESTAMP
**应用字段**: VARCHAR

View File

@ -34,7 +34,7 @@ subquery: SELECT select_list
stb_name 是保存计算结果的超级表的表名如果该超级表不存在会自动创建如果已存在则检查列的schema信息。详见 写入已存在的超级表
TAGS 句定义了流计算中创建TAG的规则可以为每个partition对应的子表生成自定义的TAG值详见 自定义TAG
TAGS 句定义了流计算中创建TAG的规则可以为每个partition对应的子表生成自定义的TAG值详见 自定义TAG
```sql
create_definition:
col_name column_definition
@ -77,7 +77,7 @@ SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(
CREATE STREAM avg_vol_s INTO avg_vol SUBTABLE(CONCAT('new-', tname)) AS SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname tname INTERVAL(1m);
```
PARTITION 子句中,为 tbname 定义了一个别名 tname, 在PARTITION 子句中的别名可以用于 SUBTABLE 子句中的表达式计算,在上述示例中,流新创建的子表将以前缀 'new-' 连接原表名作为表名。
PARTITION 子句中,为 tbname 定义了一个别名 tname, 在PARTITION 子句中的别名可以用于 SUBTABLE 子句中的表达式计算,在上述示例中,流新创建的子表将以前缀 'new-' 连接原表名作为表名(从3.2.3.0开始,为了避免 sutable 中的表达式无法区分各个子表,即误将多个相同时间线写入一个子表,在指定的子表名后面加上 _groupId)
注意,子表名的长度若超过 TDengine 的限制,将被截断。若要生成的子表名已经存在于另一超级表,由于 TDengine 的子表名是唯一的,因此对应新子表的创建以及数据的写入将会失败。

View File

@ -215,9 +215,10 @@ taos -C
| 属性 | 说明 |
| -------- | ----------------------------------------------------------- |
| 适用范围 | 仅客户端适用 |
| 含义 | Last、First、LastRow 函数查询时,返回的列名是否包含函数名。 |
| 取值范围 | 0 表示包含函数名1 表示不包含函数名。 |
| 含义 | Last、First、LastRow 函数查询且未指定别名时,自动设置别名为列名(不含函数名),因此 order by 子句如果引用了该列名将自动引用该列对应的函数 |
| 取值范围 | 1 表示自动设置别名为列名(不包含函数名), 0 表示不自动设置别名。 |
| 缺省值 | 0 |
| 补充说明 | 当同时出现多个上述函数作用于同一列且未指定别名时,如果 order by 子句引用了该列名,将会因为多列别名相同引发列选择冲突|
### countAlwaysReturnValue

View File

@ -95,11 +95,12 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序而是使用了标签名称按照字符串升序排列后的结果。所以tag_key1 并不是在行协议中输入的第一个标签。
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
:::tip
如果不想用自动生成的表名,有两种指定子表名的方式,第一种优先级更高:
通过在taos.cfg里配置 smlAutoChildTableNameDelimiter 参数来指定(`@ # 空格 回车 换行 制表符`除外)。
举例如下:配置 smlAutoChildTableNameDelimiter=- 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1-4。
通过在taos.cfg里配置 smlChildTableName 参数来指定。
举例如下:配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
如果不想用自动生成的表名,有两种指定子表名的方式(第一种优先级更高)。
1. 通过在taos.cfg里配置 smlAutoChildTableNameDelimiter 参数来指定(`@ # 空格 回车 换行 制表符`除外)。
1. 举例如下:配置 smlAutoChildTableNameDelimiter=- 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1-4。
2. 通过在taos.cfg里配置 smlChildTableName 参数来指定。
1. 举例如下:配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略。
2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)。
3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。

View File

@ -48,6 +48,7 @@ typedef enum {
TSDB_GRANT_CPU_CORES,
TSDB_GRANT_STABLE,
TSDB_GRANT_TABLE,
TSDB_GRANT_SUBSCRIBE,
} EGrantType;
int32_t grantCheck(EGrantType grant);

View File

@ -51,6 +51,7 @@ bool isEpsetEqual(const SEpSet* s1, const SEpSet* s2);
void epsetAssign(SEpSet* dst, const SEpSet* pSrc);
void updateEpSet_s(SCorEpSet* pEpSet, SEpSet* pNewEpSet);
SEpSet getEpSet_s(SCorEpSet* pEpSet);
void epsetSort(SEpSet* pEpSet);
#ifdef __cplusplus
}

View File

@ -1975,6 +1975,14 @@ typedef struct {
char data[];
} SRetrieveTableRsp;
typedef struct {
int64_t version;
int64_t numOfRows;
int8_t compressed;
int8_t precision;
char data[];
} SRetrieveTableRspForTmq;
typedef struct {
int64_t handle;
int64_t useconds;
@ -3746,7 +3754,12 @@ typedef struct {
} SMqHbReq;
typedef struct {
int8_t reserved;
char topic[TSDB_TOPIC_FNAME_LEN];
int8_t noPrivilege;
} STopicPrivilege;
typedef struct {
SArray* topicPrivileges; // SArray<STopicPrivilege>
} SMqHbRsp;
typedef struct {
@ -3765,18 +3778,6 @@ typedef struct {
SVCreateTbReq cTbReq;
} SVSubmitBlk;
typedef struct {
int32_t flags;
int32_t nBlocks;
union {
SArray* pArray;
SVSubmitBlk* pBlocks;
};
} SVSubmitReq;
int32_t tEncodeSVSubmitReq(SEncoder* pCoder, const SVSubmitReq* pReq);
int32_t tDecodeSVSubmitReq(SDecoder* pCoder, SVSubmitReq* pReq);
typedef struct {
SMsgHead header;
uint64_t sId;
@ -3885,6 +3886,10 @@ int32_t tSerializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
int32_t tDeserializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
int32_t tDeatroySMqHbReq(SMqHbReq* pReq);
int32_t tSerializeSMqHbRsp(void* buf, int32_t bufLen, SMqHbRsp* pRsp);
int32_t tDeserializeSMqHbRsp(void* buf, int32_t bufLen, SMqHbRsp* pRsp);
int32_t tDeatroySMqHbRsp(SMqHbRsp* pRsp);
int32_t tSerializeSMqSeekReq(void* buf, int32_t bufLen, SMqSeekReq* pReq);
int32_t tDeserializeSMqSeekReq(void* buf, int32_t bufLen, SMqSeekReq* pReq);

View File

@ -249,6 +249,10 @@ typedef struct SPoint {
int32_t taosGetLinearInterpolationVal(SPoint *point, int32_t outputType, SPoint *point1, SPoint *point2,
int32_t inputType);
#define LEASTSQUARES_DOUBLE_ITEM_LENGTH 25
#define LEASTSQUARES_BUFF_LENGTH 128
#define DOUBLE_PRECISION_DIGITS "16e"
#ifdef __cplusplus
}
#endif

View File

@ -156,6 +156,7 @@ typedef struct SProjectLogicNode {
SNodeList* pProjections;
char stmtName[TSDB_TABLE_NAME_LEN];
bool ignoreGroupId;
bool inputIgnoreGroup;
} SProjectLogicNode;
typedef struct SIndefRowsFuncLogicNode {
@ -449,6 +450,7 @@ typedef struct SProjectPhysiNode {
SNodeList* pProjections;
bool mergeDataBlock;
bool ignoreGroupId;
bool inputIgnoreGroup;
} SProjectPhysiNode;
typedef struct SIndefRowsFuncPhysiNode {

View File

@ -150,7 +150,7 @@ int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsS
STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl,
char* msgBuf, int32_t msgBufLen);
int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash);
int rawBlockBindData(SQuery *query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, TAOS_FIELD *fields, int numFields, bool needChangeLength);
int rawBlockBindData(SQuery *query, STableMeta* pTableMeta, void* data, SVCreateTbReq** pCreateTb, TAOS_FIELD *fields, int numFields, bool needChangeLength);
int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray);
SArray* serializeVgroupsCreateTableBatch(SHashObj* pVgroupHashmap);

View File

@ -126,6 +126,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_IP_NOT_IN_WHITE_LIST TAOS_DEF_ERROR_CODE(0, 0x0134)
#define TSDB_CODE_FAILED_TO_CONNECT_S3 TAOS_DEF_ERROR_CODE(0, 0x0135)
#define TSDB_CODE_MSG_PREPROCESSED TAOS_DEF_ERROR_CODE(0, 0x0136) // internal
//client
#define TSDB_CODE_TSC_INVALID_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0200)
@ -521,6 +522,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_QRY_INVALID_INPUT TAOS_DEF_ERROR_CODE(0, 0x070F)
// #define TSDB_CODE_QRY_INVALID_SCHEMA_VERSION TAOS_DEF_ERROR_CODE(0, 0x0710) // 2.x
// #define TSDB_CODE_QRY_RESULT_TOO_LARGE TAOS_DEF_ERROR_CODE(0, 0x0711) // 2.x
#define TSDB_CODE_QRY_INVALID_WINDOW_CONDITION TAOS_DEF_ERROR_CODE(0, 0x0712)
#define TSDB_CODE_QRY_SCH_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0720)
#define TSDB_CODE_QRY_TASK_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0721)
#define TSDB_CODE_QRY_TASK_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0722)
@ -744,6 +746,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_INVALID_VIEW_QUERY TAOS_DEF_ERROR_CODE(0, 0x266C)
#define TSDB_CODE_PAR_COL_QUERY_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x266D)
#define TSDB_CODE_PAR_VIEW_CONFLICT_WITH_TABLE TAOS_DEF_ERROR_CODE(0, 0x266E)
#define TSDB_CODE_PAR_ORDERBY_AMBIGUOUS TAOS_DEF_ERROR_CODE(0, 0x266F)
#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF)
//planner

View File

@ -155,6 +155,7 @@ typedef struct STscObj {
int8_t biMode;
int32_t acctId;
uint32_t connId;
int32_t appHbMgrIdx;
int64_t id; // ref ID returned by taosAddRef
TdThreadMutex mutex; // used to protect the operation on db
int32_t numOfReqs; // number of sqlObj bound to this connection
@ -298,6 +299,8 @@ void doSetOneRowPtr(SReqResultInfo* pResultInfo);
void setResPrecision(SReqResultInfo* pResInfo, int32_t precision);
int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4,
bool freeAfterUse);
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows,
bool convertUcs4);
void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols);
void doFreeReqResultInfo(SReqResultInfo* pResInfo);
int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, SArray** pReq);

View File

@ -283,6 +283,7 @@ void *createTscObj(const char *user, const char *auth, const char *db, int32_t c
pObj->connType = connType;
pObj->pAppInfo = pAppInfo;
pObj->appHbMgrIdx = pAppInfo->pAppHbMgr->idx;
tstrncpy(pObj->user, user, sizeof(pObj->user));
memcpy(pObj->pass, auth, TSDB_PASSWORD_LEN);

View File

@ -30,7 +30,7 @@ typedef struct {
};
} SHbParam;
static SClientHbMgr clientHbMgr = {0};
SClientHbMgr clientHbMgr = {0};
static int32_t hbCreateThread();
static void hbStopThread();
@ -1294,9 +1294,8 @@ void hbMgrCleanUp() {
taosThreadMutexLock(&clientHbMgr.lock);
appHbMgrCleanup();
taosArrayDestroy(clientHbMgr.appHbMgrs);
clientHbMgr.appHbMgrs = taosArrayDestroy(clientHbMgr.appHbMgrs);
taosThreadMutexUnlock(&clientHbMgr.lock);
clientHbMgr.appHbMgrs = NULL;
}
int hbRegisterConnImpl(SAppHbMgr *pAppHbMgr, SClientHbKey connKey, int64_t clusterId) {
@ -1335,20 +1334,19 @@ int hbRegisterConn(SAppHbMgr *pAppHbMgr, int64_t tscRefId, int64_t clusterId, in
}
void hbDeregisterConn(STscObj *pTscObj, SClientHbKey connKey) {
SAppHbMgr *pAppHbMgr = pTscObj->pAppInfo->pAppHbMgr;
taosThreadMutexLock(&clientHbMgr.lock);
SAppHbMgr *pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, pTscObj->appHbMgrIdx);
if (pAppHbMgr) {
SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
if (pReq) {
tFreeClientHbReq(pReq);
taosHashRemove(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
taosHashRelease(pAppHbMgr->activeInfo, pReq);
}
if (NULL == pReq) {
return;
}
atomic_sub_fetch_32(&pAppHbMgr->connKeyCnt, 1);
}
}
taosThreadMutexUnlock(&clientHbMgr.lock);
}
// set heart beat thread quit mode , if quicByKill 1 then kill thread else quit from inner
void taos_set_hb_quit(int8_t quitByKill) {

View File

@ -1795,6 +1795,7 @@ int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols) {
static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) {
char* p = (char*)pResultInfo->pData;
int32_t blockVersion = *(int32_t*)p;
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
// length |
@ -1810,7 +1811,7 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
char* pStart = p + len;
for (int32_t i = 0; i < numOfCols; ++i) {
int32_t colLen = htonl(colLength[i]);
int32_t colLen = (blockVersion == 1) ? htonl(colLength[i]) : colLength[i];
if (pResultInfo->fields[i].type == TSDB_DATA_TYPE_JSON) {
int32_t* offset = (int32_t*)pStart;
@ -1873,6 +1874,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
tscDebug("start to convert form json format string");
char* p = (char*)pResultInfo->pData;
int32_t blockVersion = *(int32_t*)p;
int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows);
if (dataLen <= 0) {
return TSDB_CODE_TSC_INTERNAL_ERROR;
@ -1908,8 +1910,8 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
char* pStart = p;
char* pStart1 = p1;
for (int32_t i = 0; i < numOfCols; ++i) {
int32_t colLen = htonl(colLength[i]);
int32_t colLen1 = htonl(colLength1[i]);
int32_t colLen = (blockVersion == 1) ? htonl(colLength[i]) : colLength[i];
int32_t colLen1 = (blockVersion == 1) ? htonl(colLength1[i]) : colLength1[i];
if (ASSERT(colLen < dataLen)) {
tscError("doConvertJson error: colLen:%d >= dataLen:%d", colLen, dataLen);
return TSDB_CODE_TSC_INTERNAL_ERROR;
@ -1968,7 +1970,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
}
colLen1 = len;
totalLen += colLen1;
colLength1[i] = htonl(len);
colLength1[i] = (blockVersion == 1) ? htonl(len) : len;
} else if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) {
len = numOfRows * sizeof(int32_t);
memcpy(pStart1, pStart, len);
@ -2057,7 +2059,9 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
char* pStart = p;
for (int32_t i = 0; i < numOfCols; ++i) {
if(blockVersion == 1){
colLength[i] = htonl(colLength[i]);
}
if (colLength[i] >= dataLen) {
tscError("invalid colLength %d, dataLen %d", colLength[i], dataLen);
return TSDB_CODE_TSC_INTERNAL_ERROR;

View File

@ -26,6 +26,8 @@
#include "tname.h"
#include "tversion.h"
extern SClientHbMgr clientHbMgr;
static void setErrno(SRequestObj* pRequest, int32_t code) {
pRequest->code = code;
terrno = code;
@ -63,8 +65,9 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
STscObj* pTscObj = pRequest->pTscObj;
if (NULL == pTscObj->pAppInfo || NULL == pTscObj->pAppInfo->pAppHbMgr) {
setErrno(pRequest, TSDB_CODE_TSC_DISCONNECTED);
if (NULL == pTscObj->pAppInfo) {
code = TSDB_CODE_TSC_DISCONNECTED;
setErrno(pRequest, code);
tsem_post(&pRequest->body.rspSem);
goto End;
}
@ -95,7 +98,8 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
}
if (connectRsp.epSet.numOfEps == 0) {
setErrno(pRequest, TSDB_CODE_APP_ERROR);
code = TSDB_CODE_APP_ERROR;
setErrno(pRequest, code);
tsem_post(&pRequest->body.rspSem);
goto End;
}
@ -142,7 +146,18 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
pTscObj->authVer = connectRsp.authVer;
pTscObj->whiteListInfo.ver = connectRsp.whiteListVer;
hbRegisterConn(pTscObj->pAppInfo->pAppHbMgr, pTscObj->id, connectRsp.clusterId, connectRsp.connType);
taosThreadMutexLock(&clientHbMgr.lock);
SAppHbMgr* pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, pTscObj->appHbMgrIdx);
if (pAppHbMgr) {
hbRegisterConn(pAppHbMgr, pTscObj->id, connectRsp.clusterId, connectRsp.connType);
} else {
taosThreadMutexUnlock(&clientHbMgr.lock);
code = TSDB_CODE_TSC_DISCONNECTED;
setErrno(pRequest, code);
tsem_post(&pRequest->body.rspSem);
goto End;
}
taosThreadMutexUnlock(&clientHbMgr.lock);
tscDebug("0x%" PRIx64 " clusterId:%" PRId64 ", totalConn:%" PRId64, pRequest->requestId, connectRsp.clusterId,
pTscObj->pAppInfo->numOfConns);

View File

@ -955,7 +955,6 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
taosArrayPush(pRequest->tableList, &pName);
pCreateReq->flags |= TD_CREATE_IF_NOT_EXISTS;
// change tag cid to new cid
@ -966,6 +965,12 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
// pCreateReq->ctb.suid = processSuid(pCreateReq->ctb.suid, pRequest->pDb);
toName(pTscObj->acctId, pRequest->pDb, pCreateReq->ctb.stbName, &sName);
code = catalogGetTableMeta(pCatalog, &conn, &sName, &pTableMeta);
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
code = TSDB_CODE_SUCCESS;
taosMemoryFreeClear(pTableMeta);
continue;
}
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
@ -983,6 +988,7 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
}
taosMemoryFreeClear(pTableMeta);
}
taosArrayPush(pRequest->tableList, &pName);
SVgroupCreateTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
if (pTableBatch == NULL) {
@ -999,6 +1005,9 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
}
}
if (taosHashGetSize(pVgroupHashmap) == 0) {
goto end;
}
SArray* pBufArray = serializeVgroupsCreateTableBatch(pVgroupHashmap);
if (NULL == pBufArray) {
code = TSDB_CODE_OUT_OF_MEMORY;
@ -1553,6 +1562,18 @@ end:
return code;
}
static void* getRawDataFromRes(void *pRetrieve){
void* rawData = NULL;
// deal with compatibility
if(*(int64_t*)pRetrieve == 0){
rawData = ((SRetrieveTableRsp*)pRetrieve)->data;
}else if(*(int64_t*)pRetrieve == 1){
rawData = ((SRetrieveTableRspForTmq*)pRetrieve)->data;
}
ASSERT(rawData != NULL);
return rawData;
}
static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
if(taos == NULL || data == NULL){
terrno = TSDB_CODE_INVALID_PARA;
@ -1607,7 +1628,7 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
}
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
while (++rspObj.resIter < rspObj.rsp.blockNum) {
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
void* pRetrieve = taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
if (!rspObj.rsp.withSchema) {
goto end;
}
@ -1653,7 +1674,8 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
fields[i].bytes = pSW->pSchema[i].bytes;
tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name));
}
code = rawBlockBindData(pQuery, pTableMeta, pRetrieve->data, NULL, fields, pSW->nCols, true);
void* rawData = getRawDataFromRes(pRetrieve);
code = rawBlockBindData(pQuery, pTableMeta, rawData, NULL, fields, pSW->nCols, true);
taosMemoryFree(fields);
if (code != TSDB_CODE_SUCCESS) {
goto end;
@ -1737,7 +1759,7 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
uDebug(LOG_ID_TAG" write raw metadata block num:%d", LOG_ID_VALUE, rspObj.rsp.blockNum);
while (++rspObj.resIter < rspObj.rsp.blockNum) {
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
void* pRetrieve = taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
if (!rspObj.rsp.withSchema) {
goto end;
}
@ -1824,12 +1846,12 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
fields[i].bytes = pSW->pSchema[i].bytes;
tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name));
}
code = rawBlockBindData(pQuery, pTableMeta, pRetrieve->data, pCreateReqDst, fields, pSW->nCols, true);
void* rawData = getRawDataFromRes(pRetrieve);
code = rawBlockBindData(pQuery, pTableMeta, rawData, &pCreateReqDst, fields, pSW->nCols, true);
taosMemoryFree(fields);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
pCreateReqDst = NULL;
taosMemoryFreeClear(pTableMeta);
}

View File

@ -406,10 +406,6 @@ int32_t stmtGetFromCache(STscStmt* pStmt) {
if (NULL == pStmt->sql.pTableCache || taosHashGetSize(pStmt->sql.pTableCache) <= 0) {
if (pStmt->bInfo.inExecCache) {
if (ASSERT(taosHashGetSize(pStmt->exec.pBlockHash) == 1)) {
tscError("stmtGetFromCache error");
return TSDB_CODE_TSC_STMT_CACHE_ERROR;
}
pStmt->bInfo.needParse = false;
tscDebug("reuse stmt block for tb %s in execBlock", pStmt->bInfo.tbFName);
return TSDB_CODE_SUCCESS;

View File

@ -155,6 +155,7 @@ typedef struct {
char db[TSDB_DB_FNAME_LEN];
SArray* vgs; // SArray<SMqClientVg>
SSchemaWrapper schema;
int8_t noPrivilege;
} SMqClientTopic;
typedef struct {
@ -739,6 +740,29 @@ void tmqAssignDelayedCommitTask(void* param, void* tmrId) {
int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) {
if (pMsg) {
SMqHbRsp rsp = {0};
tDeserializeSMqHbRsp(pMsg->pData, pMsg->len, &rsp);
int64_t refId = *(int64_t*)param;
tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
if (tmq != NULL) {
taosWLockLatch(&tmq->lock);
for(int32_t i = 0; i < taosArrayGetSize(rsp.topicPrivileges); i++){
STopicPrivilege* privilege = taosArrayGet(rsp.topicPrivileges, i);
if(privilege->noPrivilege == 1){
int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics);
for (int32_t j = 0; j < topicNumCur; j++) {
SMqClientTopic* pTopicCur = taosArrayGet(tmq->clientTopics, j);
if(strcmp(pTopicCur->topicName, privilege->topic) == 0){
tscInfo("consumer:0x%" PRIx64 ", has no privilege, topic:%s", tmq->consumerId, privilege->topic);
pTopicCur->noPrivilege = 1;
}
}
}
}
taosWUnLockLatch(&tmq->lock);
}
tDeatroySMqHbRsp(&rsp);
taosMemoryFree(pMsg->pData);
taosMemoryFree(pMsg->pEpSet);
}
@ -809,7 +833,9 @@ void tmqSendHbReq(void* param, void* tmrId) {
sendInfo->requestId = generateRequestId();
sendInfo->requestObjRefId = 0;
sendInfo->param = NULL;
sendInfo->paramFreeFp = taosMemoryFree;
sendInfo->param = taosMemoryMalloc(sizeof(int64_t));
*(int64_t *)sendInfo->param = refId;
sendInfo->fp = tmqHbCb;
sendInfo->msgType = TDMT_MND_TMQ_HB;
@ -1577,16 +1603,24 @@ SMqRspObj* tmqBuildRspFromWrapper(SMqPollRspWrapper* pWrapper, SMqClientVg* pVg,
pRspObj->resInfo.totalRows = 0;
pRspObj->resInfo.precision = TSDB_TIME_PRECISION_MILLI;
if (!pWrapper->dataRsp.withSchema) {
setResSchemaInfo(&pRspObj->resInfo, pWrapper->topicHandle->schema.pSchema, pWrapper->topicHandle->schema.nCols);
bool needTransformSchema = !pRspObj->rsp.withSchema;
if (!pRspObj->rsp.withSchema) { // withSchema is false if subscribe subquery, true if subscribe db or stable
pRspObj->rsp.withSchema = true;
pRspObj->rsp.blockSchema = taosArrayInit(pRspObj->rsp.blockNum, sizeof(void*));
}
// extract the rows in this data packet
for (int32_t i = 0; i < pRspObj->rsp.blockNum; ++i) {
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(pRspObj->rsp.blockData, i);
SRetrieveTableRspForTmq* pRetrieve = (SRetrieveTableRspForTmq*)taosArrayGetP(pRspObj->rsp.blockData, i);
int64_t rows = htobe64(pRetrieve->numOfRows);
pVg->numOfRows += rows;
(*numOfRows) += rows;
if (needTransformSchema) { //withSchema is false if subscribe subquery, true if subscribe db or stable
SSchemaWrapper *schema = tCloneSSchemaWrapper(&pWrapper->topicHandle->schema);
if(schema){
taosArrayPush(pRspObj->rsp.blockSchema, &schema);
}
}
}
return pRspObj;
@ -1603,13 +1637,10 @@ SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper, SMqClie
pRspObj->resInfo.totalRows = 0;
pRspObj->resInfo.precision = TSDB_TIME_PRECISION_MILLI;
if (!pWrapper->taosxRsp.withSchema) {
setResSchemaInfo(&pRspObj->resInfo, pWrapper->topicHandle->schema.pSchema, pWrapper->topicHandle->schema.nCols);
}
// extract the rows in this data packet
for (int32_t i = 0; i < pRspObj->rsp.blockNum; ++i) {
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(pRspObj->rsp.blockData, i);
SRetrieveTableRspForTmq* pRetrieve = (SRetrieveTableRspForTmq*)taosArrayGetP(pRspObj->rsp.blockData, i);
int64_t rows = htobe64(pRetrieve->numOfRows);
pVg->numOfRows += rows;
(*numOfRows) += rows;
@ -1700,7 +1731,10 @@ static int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
for (int i = 0; i < numOfTopics; i++) {
SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
int32_t numOfVg = taosArrayGetSize(pTopic->vgs);
if(pTopic->noPrivilege){
tscDebug("consumer:0x%" PRIx64 " has no privilegr for topic:%s", tmq->consumerId, pTopic->topicName);
continue;
}
for (int j = 0; j < numOfVg; j++) {
SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j);
if (taosGetTimestampMs() - pVg->emptyBlockReceiveTs < EMPTY_BLOCK_POLL_IDLE_DURATION) { // less than 10ms
@ -2548,7 +2582,7 @@ SReqResultInfo* tmqGetNextResInfo(TAOS_RES* res, bool convertUcs4) {
pRspObj->resIter++;
if (pRspObj->resIter < pRspObj->rsp.blockNum) {
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(pRspObj->rsp.blockData, pRspObj->resIter);
SRetrieveTableRspForTmq* pRetrieveTmq = (SRetrieveTableRspForTmq*)taosArrayGetP(pRspObj->rsp.blockData, pRspObj->resIter);
if (pRspObj->rsp.withSchema) {
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(pRspObj->rsp.blockSchema, pRspObj->resIter);
setResSchemaInfo(&pRspObj->resInfo, pSW->pSchema, pSW->nCols);
@ -2559,7 +2593,16 @@ SReqResultInfo* tmqGetNextResInfo(TAOS_RES* res, bool convertUcs4) {
taosMemoryFreeClear(pRspObj->resInfo.convertJson);
}
setQueryResultFromRsp(&pRspObj->resInfo, pRetrieve, convertUcs4, false);
pRspObj->resInfo.pData = (void*)pRetrieveTmq->data;
pRspObj->resInfo.numOfRows = htobe64(pRetrieveTmq->numOfRows);
pRspObj->resInfo.current = 0;
pRspObj->resInfo.precision = pRetrieveTmq->precision;
// TODO handle the compressed case
pRspObj->resInfo.totalRows += pRspObj->resInfo.numOfRows;
setResultDataPtr(&pRspObj->resInfo, pRspObj->resInfo.fields, pRspObj->resInfo.numOfCols, pRspObj->resInfo.numOfRows,
convertUcs4);
return &pRspObj->resInfo;
}

View File

@ -2196,7 +2196,7 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
// todo extract method
int32_t* version = (int32_t*)data;
*version = 1;
*version = 2;
data += sizeof(int32_t);
int32_t* actualLen = (int32_t*)data;
@ -2277,7 +2277,7 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
data += colSizes[col];
}
colSizes[col] = htonl(colSizes[col]);
// colSizes[col] = htonl(colSizes[col]);
// uError("blockEncode col bytes:%d, type:%d, size:%d, htonl size:%d", pColRes->info.bytes, pColRes->info.type,
// htonl(colSizes[col]), colSizes[col]);
}
@ -2293,7 +2293,6 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
int32_t version = *(int32_t*)pStart;
pStart += sizeof(int32_t);
ASSERT(version == 1);
// total length sizeof(int32_t)
int32_t dataLen = *(int32_t*)pStart;
@ -2339,7 +2338,9 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
pStart += sizeof(int32_t) * numOfCols;
for (int32_t i = 0; i < numOfCols; ++i) {
if(version == 1){
colLen[i] = htonl(colLen[i]);
}
ASSERT(colLen[i] >= 0);
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);

View File

@ -24,6 +24,7 @@ static int32_t (*tColDataAppendValueImpl[8][3])(SColData *pColData, uint8_t *pDa
static int32_t (*tColDataUpdateValueImpl[8][3])(SColData *pColData, uint8_t *pData, uint32_t nData, bool forward);
// SBuffer ================================
#ifdef BUILD_NO_CALL
void tBufferDestroy(SBuffer *pBuffer) {
tFree(pBuffer->pBuf);
pBuffer->pBuf = NULL;
@ -55,7 +56,7 @@ int32_t tBufferReserve(SBuffer *pBuffer, int64_t nData, void **ppData) {
return code;
}
#endif
// ================================
static int32_t tGetTagVal(uint8_t *p, STagVal *pTagVal, int8_t isJson);
@ -1148,6 +1149,7 @@ static int tTagValJsonCmprFn(const void *p1, const void *p2) {
return strcmp(((STagVal *)p1)[0].pKey, ((STagVal *)p2)[0].pKey);
}
#ifdef TD_DEBUG_PRINT_TAG
static void debugPrintTagVal(int8_t type, const void *val, int32_t vlen, const char *tag, int32_t ln) {
switch (type) {
case TSDB_DATA_TYPE_VARBINARY:
@ -1239,6 +1241,7 @@ void debugPrintSTag(STag *pTag, const char *tag, int32_t ln) {
}
printf("\n");
}
#endif
static int32_t tPutTagVal(uint8_t *p, STagVal *pTagVal, int8_t isJson) {
int32_t n = 0;
@ -2576,6 +2579,7 @@ _exit:
return code;
}
#ifdef BUILD_NO_CALL
static int32_t tColDataSwapValue(SColData *pColData, int32_t i, int32_t j) {
int32_t code = 0;
@ -2658,6 +2662,7 @@ static void tColDataSwap(SColData *pColData, int32_t i, int32_t j) {
break;
}
}
#endif
static int32_t tColDataCopyRowCell(SColData *pFromColData, int32_t iFromRow, SColData *pToColData, int32_t iToRow) {
int32_t code = TSDB_CODE_SUCCESS;

View File

@ -15,11 +15,8 @@
#define _DEFAULT_SOURCE
#include "tmisce.h"
#include "tjson.h"
#include "tglobal.h"
#include "tlog.h"
#include "tname.h"
#include "tjson.h"
int32_t taosGetFqdnPortFromEp(const char* ep, SEp* pEp) {
pEp->port = 0;
memset(pEp->fqdn, 0, TSDB_FQDN_LEN);
@ -73,6 +70,47 @@ void epsetAssign(SEpSet* pDst, const SEpSet* pSrc) {
tstrncpy(pDst->eps[i].fqdn, pSrc->eps[i].fqdn, tListLen(pSrc->eps[i].fqdn));
}
}
void epAssign(SEp* pDst, SEp* pSrc) {
if (pSrc == NULL || pDst == NULL) {
return;
}
memset(pDst->fqdn, 0, tListLen(pSrc->fqdn));
tstrncpy(pDst->fqdn, pSrc->fqdn, tListLen(pSrc->fqdn));
pDst->port = pSrc->port;
}
void epsetSort(SEpSet* pDst) {
if (pDst->numOfEps <= 1) {
return;
}
int validIdx = false;
SEp ep = {0};
if (pDst->inUse >= 0 && pDst->inUse < pDst->numOfEps) {
validIdx = true;
epAssign(&ep, &pDst->eps[pDst->inUse]);
}
for (int i = 0; i < pDst->numOfEps - 1; i++) {
for (int j = 0; j < pDst->numOfEps - 1 - i; j++) {
SEp* f = &pDst->eps[j];
SEp* s = &pDst->eps[j + 1];
int cmp = strncmp(f->fqdn, s->fqdn, sizeof(f->fqdn));
if (cmp > 0 || (cmp == 0 && f->port > s->port)) {
SEp ep = {0};
epAssign(&ep, f);
epAssign(f, s);
epAssign(s, &ep);
}
}
}
if (validIdx == true)
for (int i = 0; i < pDst->numOfEps; i++) {
int cmp = strncmp(ep.fqdn, pDst->eps[i].fqdn, sizeof(ep.fqdn));
if (cmp == 0 && ep.port == pDst->eps[i].port) {
pDst->inUse = i;
break;
}
}
}
void updateEpSet_s(SCorEpSet* pEpSet, SEpSet* pNewEpSet) {
taosCorBeginWrite(&pEpSet->version);

View File

@ -6139,6 +6139,55 @@ int32_t tDeserializeSMqAskEpReq(void *buf, int32_t bufLen, SMqAskEpReq *pReq) {
return 0;
}
int32_t tDeatroySMqHbRsp(SMqHbRsp *pRsp) {
taosArrayDestroy(pRsp->topicPrivileges);
return 0;
}
int32_t tSerializeSMqHbRsp(void *buf, int32_t bufLen, SMqHbRsp *pRsp) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
int32_t sz = taosArrayGetSize(pRsp->topicPrivileges);
if (tEncodeI32(&encoder, sz) < 0) return -1;
for (int32_t i = 0; i < sz; ++i) {
STopicPrivilege *privilege = (STopicPrivilege *)taosArrayGet(pRsp->topicPrivileges, i);
if (tEncodeCStr(&encoder, privilege->topic) < 0) return -1;
if (tEncodeI8(&encoder, privilege->noPrivilege) < 0) return -1;
}
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSMqHbRsp(void *buf, int32_t bufLen, SMqHbRsp *pRsp) {
SDecoder decoder = {0};
tDecoderInit(&decoder, (char *)buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
int32_t sz = 0;
if (tDecodeI32(&decoder, &sz) < 0) return -1;
if (sz > 0) {
pRsp->topicPrivileges = taosArrayInit(sz, sizeof(STopicPrivilege));
if (NULL == pRsp->topicPrivileges) return -1;
for (int32_t i = 0; i < sz; ++i) {
STopicPrivilege *data = taosArrayReserve(pRsp->topicPrivileges, 1);
if (tDecodeCStrTo(&decoder, data->topic) < 0) return -1;
if (tDecodeI8(&decoder, &data->noPrivilege) < 0) return -1;
}
}
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
int32_t tDeatroySMqHbReq(SMqHbReq *pReq) {
for (int i = 0; i < taosArrayGetSize(pReq->topics); i++) {
TopicOffsetRows *vgs = taosArrayGet(pReq->topics, i);
@ -6194,7 +6243,7 @@ int32_t tDeserializeSMqHbReq(void *buf, int32_t bufLen, SMqHbReq *pReq) {
if (NULL == pReq->topics) return -1;
for (int32_t i = 0; i < sz; ++i) {
TopicOffsetRows *data = taosArrayReserve(pReq->topics, 1);
tDecodeCStrTo(&decoder, data->topicName);
if (tDecodeCStrTo(&decoder, data->topicName) < 0) return -1;
int32_t szVgs = 0;
if (tDecodeI32(&decoder, &szVgs) < 0) return -1;
if (szVgs > 0) {
@ -7753,36 +7802,6 @@ static int32_t tDecodeSVSubmitBlk(SDecoder *pCoder, SVSubmitBlk *pBlock, int32_t
return 0;
}
int32_t tEncodeSVSubmitReq(SEncoder *pCoder, const SVSubmitReq *pReq) {
int32_t nBlocks = taosArrayGetSize(pReq->pArray);
if (tStartEncode(pCoder) < 0) return -1;
if (tEncodeI32v(pCoder, pReq->flags) < 0) return -1;
if (tEncodeI32v(pCoder, nBlocks) < 0) return -1;
for (int32_t iBlock = 0; iBlock < nBlocks; iBlock++) {
if (tEncodeSVSubmitBlk(pCoder, (SVSubmitBlk *)taosArrayGet(pReq->pArray, iBlock), pReq->flags) < 0) return -1;
}
tEndEncode(pCoder);
return 0;
}
int32_t tDecodeSVSubmitReq(SDecoder *pCoder, SVSubmitReq *pReq) {
if (tStartDecode(pCoder) < 0) return -1;
if (tDecodeI32v(pCoder, &pReq->flags) < 0) return -1;
if (tDecodeI32v(pCoder, &pReq->nBlocks) < 0) return -1;
pReq->pBlocks = tDecoderMalloc(pCoder, sizeof(SVSubmitBlk) * pReq->nBlocks);
if (pReq->pBlocks == NULL) return -1;
for (int32_t iBlock = 0; iBlock < pReq->nBlocks; iBlock++) {
if (tDecodeSVSubmitBlk(pCoder, pReq->pBlocks + iBlock, pReq->flags) < 0) return -1;
}
tEndDecode(pCoder);
return 0;
}
static int32_t tEncodeSSubmitBlkRsp(SEncoder *pEncoder, const SSubmitBlkRsp *pBlock) {
if (tStartEncode(pEncoder) < 0) return -1;

View File

@ -12,9 +12,10 @@
#include "tcommon.h"
#include "tdatablock.h"
#include "tdef.h"
#include "tvariant.h"
#include "tmisce.h"
#include "ttime.h"
#include "ttokendef.h"
#include "tvariant.h"
namespace {
//
@ -25,7 +26,6 @@ int main(int argc, char** argv) {
return RUN_ALL_TESTS();
}
TEST(testCase, toUIntegerEx_test) {
uint64_t val = 0;
@ -166,7 +166,7 @@ TEST(testCase, toIntegerEx_test) {
s = "-9223372036854775808";
ret = toIntegerEx(s, strlen(s), TK_NK_INTEGER, &val);
ASSERT_EQ(ret, 0);
ASSERT_EQ(val, -9223372036854775808);
// ASSERT_EQ(val, -9223372036854775808);
// out of range
s = "9323372036854775807";
@ -223,7 +223,7 @@ TEST(testCase, toInteger_test) {
s = "-9223372036854775808";
ret = toInteger(s, strlen(s), 10, &val);
ASSERT_EQ(ret, 0);
ASSERT_EQ(val, -9223372036854775808);
// ASSERT_EQ(val, -9223372036854775808);
// out of range
s = "9323372036854775807";
@ -418,7 +418,8 @@ void check_tm(const STm* tm, int32_t y, int32_t mon, int32_t d, int32_t h, int32
ASSERT_EQ(tm->fsec, fsec);
}
void test_timestamp_tm_conversion(int64_t ts, int32_t precision, int32_t y, int32_t mon, int32_t d, int32_t h, int32_t m, int32_t s, int64_t fsec) {
void test_timestamp_tm_conversion(int64_t ts, int32_t precision, int32_t y, int32_t mon, int32_t d, int32_t h,
int32_t m, int32_t s, int64_t fsec) {
int64_t ts_tmp;
char buf[128] = {0};
struct STm tm;
@ -498,7 +499,8 @@ TEST(timeTest, ts2char) {
"15:15:03:03:03:03:28:28:05:05:123:123:123000:123000:123000000:123000000:PM:PM:pm:pm");
// double quotes normal output
test_ts2char(ts, "\\\"HH24:hh24:HH12:hh12:HH:hh:MI:mi:SS:ss:MS:ms:US:us:NS:ns:PM:AM:pm:am\\\"", TSDB_TIME_PRECISION_MILLI,
test_ts2char(ts, "\\\"HH24:hh24:HH12:hh12:HH:hh:MI:mi:SS:ss:MS:ms:US:us:NS:ns:PM:AM:pm:am\\\"",
TSDB_TIME_PRECISION_MILLI,
"\"15:15:03:03:03:03:28:28:05:05:123:123:123000:123000:123000000:123000000:PM:PM:pm:pm\"");
test_ts2char(ts, "\\\"HH24:hh24:HH12:hh12:HH:hh:MI:mi:SS:ss:MS:ms:US:us:NS:ns:PM:AM:pm:am", TSDB_TIME_PRECISION_MILLI,
"\"15:15:03:03:03:03:28:28:05:05:123:123:123000:123000:123000000:123000000:PM:PM:pm:pm");
@ -506,14 +508,18 @@ TEST(timeTest, ts2char) {
test_ts2char(ts, "\"HH24:hh24:HH12:hh12:HH:hh:MI:mi:SS:ss:MS:ms:US:us:NS:ns:PM:AM:pm:am", TSDB_TIME_PRECISION_MILLI,
"HH24:hh24:HH12:hh12:HH:hh:MI:mi:SS:ss:MS:ms:US:us:NS:ns:PM:AM:pm:am");
test_ts2char(ts, "yyyy-mm-dd hh24:mi:ss.nsamaaa", TSDB_TIME_PRECISION_MILLI, "2023-10-13 15:28:05.123000000pmaaa");
test_ts2char(ts, "aaa--yyyy-mm-dd hh24:mi:ss.nsamaaa", TSDB_TIME_PRECISION_MILLI, "aaa--2023-10-13 15:28:05.123000000pmaaa");
test_ts2char(ts, "add--yyyy-mm-dd hh24:mi:ss.nsamaaa", TSDB_TIME_PRECISION_MILLI, "a13--2023-10-13 15:28:05.123000000pmaaa");
test_ts2char(ts, "aaa--yyyy-mm-dd hh24:mi:ss.nsamaaa", TSDB_TIME_PRECISION_MILLI,
"aaa--2023-10-13 15:28:05.123000000pmaaa");
test_ts2char(ts, "add--yyyy-mm-dd hh24:mi:ss.nsamaaa", TSDB_TIME_PRECISION_MILLI,
"a13--2023-10-13 15:28:05.123000000pmaaa");
ts = 1693946405000;
test_ts2char(ts, "Day, Month dd, YYYY hh24:mi:ss AM TZH:tzh", TSDB_TIME_PRECISION_MILLI, "Wednesday, September 06, 2023 04:40:05 AM +08:+08");
test_ts2char(ts, "Day, Month dd, YYYY hh24:mi:ss AM TZH:tzh", TSDB_TIME_PRECISION_MILLI,
"Wednesday, September 06, 2023 04:40:05 AM +08:+08");
ts = -62198784343000; // milliseconds before epoch, Friday, January 1, -0001 12:00:00 AM GMT+08:06
test_ts2char(ts, "Day, Month dd, YYYY hh12:mi:ss AM", TSDB_TIME_PRECISION_MILLI, "Friday , January 01, -001 12:00:00 AM");
test_ts2char(ts, "Day, Month dd, YYYY hh12:mi:ss AM", TSDB_TIME_PRECISION_MILLI,
"Friday , January 01, -001 12:00:00 AM");
}
TEST(timeTest, char2ts) {
@ -635,8 +641,55 @@ TEST(timeTest, char2ts) {
ASSERT_EQ(0, TEST_char2ts("yyyy年 MM/ddTZH", &ts, TSDB_TIME_PRECISION_MICRO, "1970年 1/1+0"));
ASSERT_EQ(ts, 0);
ASSERT_EQ(0, TEST_char2ts("yyyy年 a a a MM/ddTZH", &ts, TSDB_TIME_PRECISION_MICRO, "1970年 a a a 1/1+0"));
ASSERT_EQ(0, TEST_char2ts("yyyy年 a a a a a a a a a a a a a a a MM/ddTZH", &ts, TSDB_TIME_PRECISION_MICRO, "1970年 a "));
ASSERT_EQ(0, TEST_char2ts("yyyy年 a a a a a a a a a a a a a a a MM/ddTZH", &ts, TSDB_TIME_PRECISION_MICRO,
"1970年 a "));
ASSERT_EQ(-3, TEST_char2ts("yyyy-mm-DDD", &ts, TSDB_TIME_PRECISION_MILLI, "1970-01-001"));
}
TEST(timeTest, epSet) {
{
SEpSet ep = {0};
addEpIntoEpSet(&ep, "local", 14);
addEpIntoEpSet(&ep, "aocal", 13);
addEpIntoEpSet(&ep, "abcal", 12);
addEpIntoEpSet(&ep, "abcaleb", 11);
epsetSort(&ep);
ASSERT_EQ(strcmp(ep.eps[0].fqdn, "abcal"), 0);
ASSERT_EQ(ep.eps[0].port, 12);
ASSERT_EQ(strcmp(ep.eps[1].fqdn, "abcaleb"), 0);
ASSERT_EQ(ep.eps[1].port, 11);
ASSERT_EQ(strcmp(ep.eps[2].fqdn, "aocal"), 0);
ASSERT_EQ(ep.eps[2].port, 13);
ASSERT_EQ(strcmp(ep.eps[3].fqdn, "local"), 0);
ASSERT_EQ(ep.eps[3].port, 14);
}
{
SEpSet ep = {0};
addEpIntoEpSet(&ep, "local", 14);
addEpIntoEpSet(&ep, "local", 13);
addEpIntoEpSet(&ep, "local", 12);
addEpIntoEpSet(&ep, "local", 11);
epsetSort(&ep);
ASSERT_EQ(strcmp(ep.eps[0].fqdn, "local"), 0);
ASSERT_EQ(ep.eps[0].port, 11);
ASSERT_EQ(strcmp(ep.eps[0].fqdn, "local"), 0);
ASSERT_EQ(ep.eps[1].port, 12);
ASSERT_EQ(strcmp(ep.eps[0].fqdn, "local"), 0);
ASSERT_EQ(ep.eps[2].port, 13);
ASSERT_EQ(strcmp(ep.eps[0].fqdn, "local"), 0);
ASSERT_EQ(ep.eps[3].port, 14);
}
{
SEpSet ep = {0};
addEpIntoEpSet(&ep, "local", 14);
epsetSort(&ep);
ASSERT_EQ(ep.numOfEps, 1);
}
}
#pragma GCC diagnostic pop

View File

@ -169,11 +169,29 @@ static int32_t dmParseArgs(int32_t argc, char const *argv[]) {
return -1;
}
} else if (strcmp(argv[i], "-a") == 0) {
tstrncpy(global.apolloUrl, argv[++i], PATH_MAX);
if(i < argc - 1) {
if (strlen(argv[++i]) >= PATH_MAX) {
printf("apollo url overflow");
return -1;
}
tstrncpy(global.apolloUrl, argv[i], PATH_MAX);
} else {
printf("'-a' requires a parameter\n");
return -1;
}
} else if (strcmp(argv[i], "-s") == 0) {
global.dumpSdb = true;
} else if (strcmp(argv[i], "-E") == 0) {
tstrncpy(global.envFile, argv[++i], PATH_MAX);
if(i < argc - 1) {
if (strlen(argv[++i]) >= PATH_MAX) {
printf("env file path overflow");
return -1;
}
tstrncpy(global.envFile, argv[i], PATH_MAX);
} else {
printf("'-E' requires a parameter\n");
return -1;
}
} else if (strcmp(argv[i], "-k") == 0) {
global.generateGrant = true;
} else if (strcmp(argv[i], "-C") == 0) {

View File

@ -350,7 +350,7 @@ static bool rpcRfp(int32_t code, tmsg_t msgType) {
code == TSDB_CODE_SYN_RESTORING || code == TSDB_CODE_VND_STOPPED || code == TSDB_CODE_APP_IS_STARTING ||
code == TSDB_CODE_APP_IS_STOPPING) {
if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY || msgType == TDMT_SCH_FETCH ||
msgType == TDMT_SCH_MERGE_FETCH || msgType == TDMT_SCH_TASK_NOTIFY) {
msgType == TDMT_SCH_MERGE_FETCH || msgType == TDMT_SCH_TASK_NOTIFY || msgType == TDMT_VND_DROP_TTL_TABLE) {
return false;
}
return true;

View File

@ -289,6 +289,7 @@ static void dmResetEps(SDnodeData *pData, SArray *dnodeEps) {
pData->mnodeEps.eps[mIndex] = pDnodeEp->ep;
mIndex++;
}
epsetSort(&pData->mnodeEps);
for (int32_t i = 0; i < numOfEps; i++) {
SDnodeEp *pDnodeEp = taosArrayGet(dnodeEps, i);

View File

@ -30,7 +30,6 @@ int32_t mndCheckDbPrivilege(SMnode *pMnode, const char *user, EOperType operType
int32_t mndCheckDbPrivilegeByName(SMnode *pMnode, const char *user, EOperType operType, const char *dbname);
int32_t mndCheckViewPrivilege(SMnode *pMnode, const char *user, EOperType operType, const char *pViewFName);
int32_t mndCheckTopicPrivilege(SMnode *pMnode, const char *user, EOperType operType, SMqTopicObj *pTopic);
int32_t mndCheckTopicPrivilegeByName(SMnode *pMnode, const char *user, EOperType operType, const char *topicName);
int32_t mndCheckShowPrivilege(SMnode *pMnode, const char *user, EShowType showType, const char *dbname);
int32_t mndCheckAlterUserPrivilege(SUserObj *pOperUser, SUserObj *pUser, SAlterUserReq *pAlter);
int32_t mndSetUserAuthRsp(SMnode *pMnode, SUserObj *pUser, SGetUserAuthRsp *pRsp);

View File

@ -101,8 +101,9 @@ static int32_t validateTopics(STrans *pTrans, const SArray *pTopicList, SMnode *
goto FAILED;
}
if (mndCheckTopicPrivilege(pMnode, pUser, MND_OPER_SUBSCRIBE, pTopic) != 0) {
code = -1;
if (mndCheckTopicPrivilege(pMnode, pUser, MND_OPER_SUBSCRIBE, pTopic) != 0 || grantCheck(TSDB_GRANT_SUBSCRIBE) < 0) {
code = TSDB_CODE_MND_NO_RIGHTS;
terrno = TSDB_CODE_MND_NO_RIGHTS;
goto FAILED;
}
@ -220,22 +221,53 @@ FAIL:
return -1;
}
static int32_t checkPrivilege(SMnode *pMnode, SMqConsumerObj *pConsumer, SMqHbRsp *rsp, char* user){
rsp->topicPrivileges = taosArrayInit(taosArrayGetSize(pConsumer->currentTopics), sizeof(STopicPrivilege));
if(rsp->topicPrivileges == NULL){
terrno = TSDB_CODE_OUT_OF_MEMORY;
return terrno;
}
for(int32_t i = 0; i < taosArrayGetSize(pConsumer->currentTopics); i++){
char *topic = taosArrayGetP(pConsumer->currentTopics, i);
SMqTopicObj* pTopic = mndAcquireTopic(pMnode, topic);
if (pTopic == NULL) { // terrno has been set by callee function
continue;
}
STopicPrivilege *data = taosArrayReserve(rsp->topicPrivileges, 1);
strcpy(data->topic, topic);
if (mndCheckTopicPrivilege(pMnode, user, MND_OPER_SUBSCRIBE, pTopic) != 0 || grantCheck(TSDB_GRANT_SUBSCRIBE) < 0) {
data->noPrivilege = 1;
} else{
data->noPrivilege = 0;
}
mndReleaseTopic(pMnode, pTopic);
}
return 0;
}
static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
int32_t code = 0;
SMnode *pMnode = pMsg->info.node;
SMqHbReq req = {0};
SMqHbRsp rsp = {0};
SMqConsumerObj *pConsumer = NULL;
if ((code = tDeserializeSMqHbReq(pMsg->pCont, pMsg->contLen, &req)) < 0) {
if (tDeserializeSMqHbReq(pMsg->pCont, pMsg->contLen, &req) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
int64_t consumerId = req.consumerId;
SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, consumerId);
pConsumer = mndAcquireConsumer(pMnode, consumerId);
if (pConsumer == NULL) {
mError("consumer:0x%" PRIx64 " not exist", consumerId);
terrno = TSDB_CODE_MND_CONSUMER_NOT_EXIST;
code = -1;
code = TSDB_CODE_MND_CONSUMER_NOT_EXIST;
goto end;
}
code = checkPrivilege(pMnode, pConsumer, &rsp, pMsg->info.conn.user);
if(code != 0){
goto end;
}
@ -280,9 +312,22 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
mndReleaseSubscribe(pMnode, pSub);
}
mndReleaseConsumer(pMnode, pConsumer);
// encode rsp
int32_t tlen = tSerializeSMqHbRsp(NULL, 0, &rsp);
void *buf = rpcMallocCont(tlen);
if (buf == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
tSerializeSMqHbRsp(buf, tlen, &rsp);
pMsg->info.rsp = buf;
pMsg->info.rspLen = tlen;
end:
tDeatroySMqHbRsp(&rsp);
mndReleaseConsumer(pMnode, pConsumer);
tDeatroySMqHbReq(&req);
return code;
}
@ -500,6 +545,11 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
SMnode *pMnode = pMsg->info.node;
char *msgStr = pMsg->pCont;
if(grantCheck(TSDB_GRANT_SUBSCRIBE) < 0){
terrno = TSDB_CODE_GRANT_EXPIRED;
return -1;
}
SCMSubscribeReq subscribe = {0};
tDeserializeSCMSubscribeReq(msgStr, &subscribe);
@ -525,7 +575,7 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
}
// check topic existence
pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pMsg, "subscribe");
pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_TOPIC_INSIDE, pMsg, "subscribe");
if (pTrans == NULL) {
goto _over;
}

View File

@ -545,6 +545,7 @@ void dumpHeader(SSdb *pSdb, SJson *json) {
SJson *maxIdsJson = tjsonCreateObject();
tjsonAddItemToObject(json, "maxIds", maxIdsJson);
for (int32_t i = 0; i < SDB_MAX; ++i) {
if(i == 5) continue;
int64_t maxId = 0;
if (i < SDB_MAX) {
maxId = pSdb->maxId[i];

View File

@ -15,6 +15,7 @@
#define _DEFAULT_SOURCE
#include "mndMnode.h"
#include "audit.h"
#include "mndCluster.h"
#include "mndDnode.h"
#include "mndPrivilege.h"
@ -22,7 +23,6 @@
#include "mndSync.h"
#include "mndTrans.h"
#include "tmisce.h"
#include "audit.h"
#define MNODE_VER_NUMBER 2
#define MNODE_RESERVE_SIZE 64
@ -251,6 +251,7 @@ void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) {
pEpSet->inUse = pEpSet->numOfEps;
} else {
pEpSet->inUse = (pEpSet->numOfEps + 1) % totalMnodes;
// pEpSet->inUse = 0;
}
}
if (pObj->pDnode != NULL) {
@ -266,6 +267,7 @@ void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) {
if (pEpSet->inUse >= pEpSet->numOfEps) {
pEpSet->inUse = 0;
}
epsetSort(pEpSet);
}
static int32_t mndSetCreateMnodeRedoLogs(SMnode *pMnode, STrans *pTrans, SMnodeObj *pObj) {
@ -320,8 +322,8 @@ static int32_t mndBuildCreateMnodeRedoAction(STrans *pTrans, SDCreateMnodeReq *p
return 0;
}
static int32_t mndBuildAlterMnodeTypeRedoAction(STrans *pTrans,
SDAlterMnodeTypeReq *pAlterMnodeTypeReq, SEpSet *pAlterMnodeTypeEpSet) {
static int32_t mndBuildAlterMnodeTypeRedoAction(STrans *pTrans, SDAlterMnodeTypeReq *pAlterMnodeTypeReq,
SEpSet *pAlterMnodeTypeEpSet) {
int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, pAlterMnodeTypeReq);
void *pReq = taosMemoryMalloc(contLen);
tSerializeSDCreateMnodeReq(pReq, contLen, pAlterMnodeTypeReq);
@ -401,8 +403,7 @@ static int32_t mndSetCreateMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDno
createReq.replicas[numOfReplicas].port = pMObj->pDnode->port;
memcpy(createReq.replicas[numOfReplicas].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
numOfReplicas++;
}
else{
} else {
createReq.learnerReplicas[numOfLearnerReplicas].id = pMObj->id;
createReq.learnerReplicas[numOfLearnerReplicas].port = pMObj->pDnode->port;
memcpy(createReq.learnerReplicas[numOfLearnerReplicas].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
@ -451,8 +452,7 @@ int32_t mndSetRestoreCreateMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDno
createReq.replicas[createReq.replica].port = pMObj->pDnode->port;
memcpy(createReq.replicas[createReq.replica].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
createReq.replica++;
}
else{
} else {
createReq.learnerReplicas[createReq.learnerReplica].id = pMObj->id;
createReq.learnerReplicas[createReq.learnerReplica].port = pMObj->pDnode->port;
memcpy(createReq.learnerReplicas[createReq.learnerReplica].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
@ -495,8 +495,7 @@ static int32_t mndSetAlterMnodeTypeRedoActions(SMnode *pMnode, STrans *pTrans, S
alterReq.replicas[alterReq.replica].port = pMObj->pDnode->port;
memcpy(alterReq.replicas[alterReq.replica].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
alterReq.replica++;
}
else{
} else {
alterReq.learnerReplicas[alterReq.learnerReplica].id = pMObj->id;
alterReq.learnerReplicas[alterReq.learnerReplica].port = pMObj->pDnode->port;
memcpy(alterReq.learnerReplicas[alterReq.learnerReplica].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
@ -544,8 +543,7 @@ int32_t mndSetRestoreAlterMnodeTypeRedoActions(SMnode *pMnode, STrans *pTrans, S
alterReq.replicas[alterReq.replica].port = pMObj->pDnode->port;
memcpy(alterReq.replicas[alterReq.replica].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
alterReq.replica++;
}
else{
} else {
alterReq.learnerReplicas[alterReq.learnerReplica].id = pMObj->id;
alterReq.learnerReplicas[alterReq.learnerReplica].port = pMObj->pDnode->port;
memcpy(alterReq.learnerReplicas[alterReq.learnerReplica].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
@ -959,7 +957,10 @@ static void mndReloadSyncConfig(SMnode *pMnode) {
void *pIter = NULL;
int32_t updatingMnodes = 0;
int32_t readyMnodes = 0;
SSyncCfg cfg = {.myIndex = -1, .lastIndex = 0,};
SSyncCfg cfg = {
.myIndex = -1,
.lastIndex = 0,
};
SyncIndex maxIndex = 0;
while (1) {
@ -1023,8 +1024,8 @@ static void mndReloadSyncConfig(SMnode *pMnode) {
}
if (pMnode->syncMgmt.sync > 0) {
mInfo("vgId:1, mnode sync reconfig, totalReplica:%d replica:%d myIndex:%d",
cfg.totalReplicaNum, cfg.replicaNum, cfg.myIndex);
mInfo("vgId:1, mnode sync reconfig, totalReplica:%d replica:%d myIndex:%d", cfg.totalReplicaNum, cfg.replicaNum,
cfg.myIndex);
for (int32_t i = 0; i < cfg.totalReplicaNum; ++i) {
SNodeInfo *pNode = &cfg.nodeInfo[i];

View File

@ -30,9 +30,6 @@ int32_t mndCheckDbPrivilegeByName(SMnode *pMnode, const char *user, EOperType op
}
int32_t mndCheckTopicPrivilege(SMnode *pMnode, const char *user, EOperType operType, SMqTopicObj *pTopic) { return 0; }
int32_t mndCheckTopicPrivilegeByName(SMnode *pMnode, const char *user, EOperType operType, const char *topicName) {
return 0;
}
int32_t mndSetUserWhiteListRsp(SMnode *pMnode, SUserObj *pUser, SGetUserWhiteListRsp *pWhiteListRsp) {

View File

@ -644,6 +644,11 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
int32_t sqlLen = 0;
terrno = TSDB_CODE_SUCCESS;
if(grantCheck(TSDB_GRANT_STREAMS) < 0){
terrno = TSDB_CODE_GRANT_STREAM_LIMITED;
return -1;
}
SCMCreateStreamReq createStreamReq = {0};
if (tDeserializeSCMCreateStreamReq(pReq->pCont, pReq->contLen, &createStreamReq) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
@ -1624,21 +1629,26 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
SStreamObj *pStream = NULL;
SMResumeStreamReq pauseReq = {0};
if (tDeserializeSMResumeStreamReq(pReq->pCont, pReq->contLen, &pauseReq) < 0) {
if(grantCheck(TSDB_GRANT_STREAMS) < 0){
terrno = TSDB_CODE_GRANT_EXPIRED;
return -1;
}
SMResumeStreamReq resumeReq = {0};
if (tDeserializeSMResumeStreamReq(pReq->pCont, pReq->contLen, &resumeReq) < 0) {
terrno = TSDB_CODE_INVALID_MSG;
return -1;
}
pStream = mndAcquireStream(pMnode, pauseReq.name);
pStream = mndAcquireStream(pMnode, resumeReq.name);
if (pStream == NULL) {
if (pauseReq.igNotExists) {
mInfo("stream:%s not exist, not resume stream", pauseReq.name);
if (resumeReq.igNotExists) {
mInfo("stream:%s not exist, not resume stream", resumeReq.name);
sdbRelease(pMnode->pSdb, pStream);
return 0;
} else {
mError("stream:%s not exist, failed to resume stream", pauseReq.name);
mError("stream:%s not exist, failed to resume stream", resumeReq.name);
terrno = TSDB_CODE_MND_STREAM_NOT_EXIST;
return -1;
}
@ -1663,7 +1673,7 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) {
STrans* pTrans = doCreateTrans(pMnode, pStream, pReq, MND_STREAM_RESUME_NAME, "resume the stream");
if (pTrans == NULL) {
mError("stream:%s, failed to resume stream since %s", pauseReq.name, terrstr());
mError("stream:%s, failed to resume stream since %s", resumeReq.name, terrstr());
sdbRelease(pMnode->pSdb, pStream);
return -1;
}
@ -1671,8 +1681,8 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) {
int32_t code = mndStreamRegisterTrans(pTrans, MND_STREAM_RESUME_NAME, pStream->uid);
// set the resume action
if (mndStreamSetResumeAction(pTrans, pMnode, pStream, pauseReq.igUntreated) < 0) {
mError("stream:%s, failed to drop task since %s", pauseReq.name, terrstr());
if (mndStreamSetResumeAction(pTrans, pMnode, pStream, resumeReq.igUntreated) < 0) {
mError("stream:%s, failed to drop task since %s", resumeReq.name, terrstr());
sdbRelease(pMnode->pSdb, pStream);
mndTransDrop(pTrans);
return -1;
@ -2130,7 +2140,6 @@ static void doAddTaskId(SArray* pList, int32_t taskId, int64_t uid, int32_t numO
int32_t mndProcessStreamReqCheckpoint(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
SStreamTaskCheckpointReq req = {0};
SDecoder decoder = {0};

View File

@ -182,17 +182,55 @@ static int32_t mndDropOrphanTasks(SMnode* pMnode, SArray* pList) {
mndTransDrop(pTrans);
return -1;
}
mndTransDrop(pTrans);
return 0;
}
int32_t suspendAllStreams(SMnode *pMnode, SRpcHandleInfo* info){
SSdb *pSdb = pMnode->pSdb;
SStreamObj *pStream = NULL;
void* pIter = NULL;
while(1) {
pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream);
if (pIter == NULL) break;
if(pStream->status != STREAM_STATUS__PAUSE){
SMPauseStreamReq reqPause = {0};
strcpy(reqPause.name, pStream->name);
reqPause.igNotExists = 1;
int32_t contLen = tSerializeSMPauseStreamReq(NULL, 0, &reqPause);
void * pHead = rpcMallocCont(contLen);
tSerializeSMPauseStreamReq(pHead, contLen, &reqPause);
SRpcMsg rpcMsg = {
.msgType = TDMT_MND_PAUSE_STREAM,
.pCont = pHead,
.contLen = contLen,
.info = *info,
};
tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg);
mInfo("receive pause stream:%s, %s, %p, because grant expired", pStream->name, reqPause.name, reqPause.name);
}
sdbRelease(pSdb, pStream);
}
return 0;
}
int32_t mndProcessStreamHb(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
SStreamHbMsg req = {0};
SArray *pFailedTasks = taosArrayInit(4, sizeof(SFailedCheckpointInfo));
SArray *pOrphanTasks = taosArrayInit(3, sizeof(SOrphanTask));
if(grantCheck(TSDB_GRANT_STREAMS) < 0){
if(suspendAllStreams(pMnode, &pReq->info) < 0){
return -1;
}
}
SDecoder decoder = {0};
tDecoderInit(&decoder, pReq->pCont, pReq->contLen);

View File

@ -1925,6 +1925,7 @@ static int32_t mndProcessAlterUserPrivilegesReq(SAlterUserReq *pAlterReq, SMnode
return -1;
}
taosHashPut(pNewUser->topics, pTopic->name, len, pTopic->name, TSDB_TOPIC_FNAME_LEN);
mndReleaseTopic(pMnode, pTopic);
}
if (ALTER_USER_DEL_SUBSCRIBE_TOPIC_PRIV(pAlterReq->alterType, pAlterReq->privileges)) {
@ -1935,6 +1936,7 @@ static int32_t mndProcessAlterUserPrivilegesReq(SAlterUserReq *pAlterReq, SMnode
return -1;
}
taosHashRemove(pNewUser->topics, pAlterReq->objname, len);
mndReleaseTopic(pMnode, pTopic);
}
return TSDB_CODE_SUCCESS;

View File

@ -877,6 +877,7 @@ SEpSet mndGetVgroupEpset(SMnode *pMnode, const SVgObj *pVgroup) {
addEpIntoEpSet(&epset, pDnode->fqdn, pDnode->port);
mndReleaseDnode(pMnode, pDnode);
}
epsetSort(&epset);
return epset;
}

View File

@ -64,6 +64,8 @@ const char *sdbTableName(ESdbType type) {
return "idx";
case SDB_VIEW:
return "view";
case SDB_STREAM_SEQ:
return "stream_seq";
case SDB_COMPACT:
return "compact";
case SDB_COMPACT_DETAIL:

View File

@ -16,21 +16,20 @@
#include "tq.h"
int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t numOfCols, int8_t precision) {
int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock);
int32_t dataStrLen = sizeof(SRetrieveTableRspForTmq) + blockGetEncodeSize(pBlock);
void* buf = taosMemoryCalloc(1, dataStrLen);
if (buf == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf;
pRetrieve->useconds = 0;
SRetrieveTableRspForTmq* pRetrieve = (SRetrieveTableRspForTmq*)buf;
pRetrieve->version = 1;
pRetrieve->precision = precision;
pRetrieve->compressed = 0;
pRetrieve->completed = 1;
pRetrieve->numOfRows = htobe64((int64_t)pBlock->info.rows);
int32_t actualLen = blockEncode(pBlock, pRetrieve->data, numOfCols);
actualLen += sizeof(SRetrieveTableRsp);
actualLen += sizeof(SRetrieveTableRspForTmq);
taosArrayPush(pRsp->blockDataLen, &actualLen);
taosArrayPush(pRsp->blockData, &buf);

View File

@ -941,7 +941,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr
}
if(lastrowTmpIndexArray != NULL) {
mergeLastCid(uid, pTsdb, &lastrowTmpColArray, pr, lastrowColIds, lastrowIndex, lastrowSlotIds);
mergeLastRowCid(uid, pTsdb, &lastrowTmpColArray, pr, lastrowColIds, lastrowIndex, lastrowSlotIds);
for(int i = 0; i < taosArrayGetSize(lastrowTmpColArray); i++) {
taosArrayInsert(pTmpColArray, *(int32_t*)taosArrayGet(lastrowTmpIndexArray, i), taosArrayGet(lastrowTmpColArray, i));
}

View File

@ -157,7 +157,8 @@ int vnodeShouldCommit(SVnode *pVnode, bool atExit) {
taosThreadMutexLock(&pVnode->mutex);
if (pVnode->inUse && diskAvail) {
needCommit = (pVnode->inUse->size > pVnode->inUse->node.size) ||
(atExit && (pVnode->inUse->size > 0 || pVnode->pMeta->changed));
(atExit && (pVnode->inUse->size > 0 || pVnode->pMeta->changed ||
pVnode->state.applied - pVnode->state.committed > 4096));
}
taosThreadMutexUnlock(&pVnode->mutex);
return needCommit;

View File

@ -183,6 +183,11 @@ static int32_t vnodePreProcessDropTtlMsg(SVnode *pVnode, SRpcMsg *pMsg) {
ttlReq.pTbUids = tbUids;
}
if (ttlReq.nUids == 0) {
code = TSDB_CODE_MSG_PREPROCESSED;
TSDB_CHECK_CODE(code, lino, _exit);
}
{ // prepare new content
int32_t reqLenNew = tSerializeSVDropTtlTableReq(NULL, 0, &ttlReq);
int32_t contLenNew = reqLenNew + sizeof(SMsgHead);
@ -207,7 +212,7 @@ static int32_t vnodePreProcessDropTtlMsg(SVnode *pVnode, SRpcMsg *pMsg) {
_exit:
taosArrayDestroy(tbUids);
if (code) {
if (code && code != TSDB_CODE_MSG_PREPROCESSED) {
vError("vgId:%d, %s:%d failed to preprocess drop ttl request since %s, msg type:%s", TD_VID(pVnode), __func__, lino,
tstrerror(code), TMSG_INFO(pMsg->msgType));
} else {
@ -464,7 +469,7 @@ int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
break;
}
if (code) {
if (code && code != TSDB_CODE_MSG_PREPROCESSED) {
vError("vgId:%d, failed to preprocess write request since %s, msg type:%s", TD_VID(pVnode), tstrerror(code),
TMSG_INFO(pMsg->msgType));
}

View File

@ -95,6 +95,11 @@ static void inline vnodeHandleWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
static void vnodeHandleProposeError(SVnode *pVnode, SRpcMsg *pMsg, int32_t code) {
if (code == TSDB_CODE_SYN_NOT_LEADER || code == TSDB_CODE_SYN_RESTORING) {
vnodeRedirectRpcMsg(pVnode, pMsg, code);
} else if (code == TSDB_CODE_MSG_PREPROCESSED) {
SRpcMsg rsp = {.code = TSDB_CODE_SUCCESS, .info = pMsg->info};
if (rsp.info.handle != NULL) {
tmsgSendRsp(&rsp);
}
} else {
const STraceId *trace = &pMsg->info.traceId;
vGError("vgId:%d, msg:%p failed to propose since %s, code:0x%x", pVnode->config.vgId, pMsg, tstrerror(code), code);
@ -297,8 +302,10 @@ void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs)
code = vnodePreProcessWriteMsg(pVnode, pMsg);
if (code != 0) {
if (code != TSDB_CODE_MSG_PREPROCESSED) {
vGError("vgId:%d, msg:%p failed to pre-process since %s", vgId, pMsg, tstrerror(code));
if (terrno != 0) code = terrno;
}
vnodeHandleProposeError(pVnode, pMsg, code);
rpcFreeCont(pMsg->pCont);
taosFreeQitem(pMsg);

View File

@ -200,6 +200,7 @@ typedef struct SExchangeInfo {
uint64_t self;
SLimitInfo limitInfo;
int64_t openedTs; // start exec time stamp, todo: move to SLoadRemoteDataInfo
char* pTaskId;
} SExchangeInfo;
typedef struct SScanInfo {
@ -272,7 +273,8 @@ typedef struct STableScanInfo {
SSampleExecInfo sample; // sample execution info
int32_t tableStartIndex; // current group scan start
int32_t tableEndIndex; // current group scan end
int32_t currentGroupIndex; // current group index of groupOffset
int32_t currentGroupId;
int32_t currentTable;
int8_t scanMode;
int8_t assignBlockUid;
uint8_t countState; // empty table count state

View File

@ -204,6 +204,10 @@ void tsortSetAbortCheckFn(SSortHandle* pHandle, bool (*checkFn)(void* param), vo
*/
int32_t tsortCompAndBuildKeys(const SArray* pSortCols, char* keyBuf, int32_t* keyLen, const STupleHandle* pTuple);
/**
* @brief set the merge limit reached callback. it calls mergeLimitReached param with tableUid and param
*/
void tsortSetMergeLimitReachedFp(SSortHandle* pHandle, void (*mergeLimitReached)(uint64_t tableUid, void* param), void* param);
#ifdef __cplusplus
}
#endif

View File

@ -260,14 +260,17 @@ static int32_t initDataSource(int32_t numOfSources, SExchangeInfo* pInfo, const
return TSDB_CODE_SUCCESS;
}
int32_t len = strlen(id) + 1;
pInfo->pTaskId = taosMemoryCalloc(1, len);
strncpy(pInfo->pTaskId, id, len);
for (int32_t i = 0; i < numOfSources; ++i) {
SSourceDataInfo dataInfo = {0};
dataInfo.status = EX_SOURCE_DATA_NOT_READY;
dataInfo.taskId = id;
dataInfo.taskId = pInfo->pTaskId;
dataInfo.index = i;
SSourceDataInfo* pDs = taosArrayPush(pInfo->pSourceDataInfo, &dataInfo);
if (pDs == NULL) {
taosArrayDestroy(pInfo->pSourceDataInfo);
taosArrayDestroyEx(pInfo->pSourceDataInfo, freeSourceDataInfo);
return TSDB_CODE_OUT_OF_MEMORY;
}
}
@ -383,6 +386,8 @@ void doDestroyExchangeOperatorInfo(void* param) {
tSimpleHashCleanup(pExInfo->pHashSources);
tsem_destroy(&pExInfo->ready);
taosMemoryFreeClear(pExInfo->pTaskId);
taosMemoryFreeClear(param);
}
@ -782,7 +787,7 @@ int32_t addSingleExchangeSource(SOperatorInfo* pOperator, SExchangeOperatorBasic
if (pIdx->inUseIdx < 0) {
SSourceDataInfo dataInfo = {0};
dataInfo.status = EX_SOURCE_DATA_NOT_READY;
dataInfo.taskId = GET_TASKID(pOperator->pTaskInfo);
dataInfo.taskId = pExchangeInfo->pTaskId;
dataInfo.index = pIdx->srcIdx;
dataInfo.pSrcUidList = taosArrayDup(pBasicParam->uidList, NULL);
dataInfo.srcOpType = pBasicParam->srcOpType;

View File

@ -646,10 +646,6 @@ int32_t getColInfoResultForGroupby(void* pVnode, SNodeList* group, STableListInf
}
}
if (initRemainGroups) {
pTableListInfo->numOfOuputGroups = taosHashGetSize(pTableListInfo->remainGroups);
}
if (tsTagFilterCache) {
tableList = taosArrayDup(pTableListInfo->pTableList, NULL);
pAPI->metaFn.metaPutTbGroupToCache(pVnode, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest),
@ -1862,7 +1858,7 @@ STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowI
STimeWindow w = {0};
if (pResultRowInfo->cur.pageId == -1) { // the first window, from the previous stored value
getInitialStartTimeWindow(pInterval, ts, &w, (order == TSDB_ORDER_ASC));
w.ekey = taosTimeAdd(w.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
w.ekey = taosTimeGetIntervalEnd(w.skey, pInterval);
return w;
}
@ -2142,6 +2138,8 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle*
pTableListInfo->numOfOuputGroups = numOfTables;
} else if (groupByTbname && pScanNode->groupOrderScan){
pTableListInfo->numOfOuputGroups = numOfTables;
} else if (groupByTbname && tsCountAlwaysReturnValue && ((STableScanPhysiNode*)pScanNode)->needCountEmptyTable) {
pTableListInfo->numOfOuputGroups = numOfTables;
} else {
pTableListInfo->numOfOuputGroups = 1;
}
@ -2159,6 +2157,8 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle*
return code;
}
if (pScanNode->groupOrderScan) pTableListInfo->numOfOuputGroups = taosArrayGetSize(pTableListInfo->pTableList);
if (groupSort || pScanNode->groupOrderScan) {
code = sortTableGroup(pTableListInfo);
}

View File

@ -1213,7 +1213,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
STableKeyInfo* pTableInfo = tableListGetInfo(pTableListInfo, 0);
uid = pTableInfo->uid;
ts = INT64_MIN;
pScanInfo->tableEndIndex = 0;
pScanInfo->currentTable = 0;
} else {
taosRUnLockLatch(&pTaskInfo->lock);
qError("no table in table list, %s", id);
@ -1227,16 +1227,16 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
pInfo->pTableScanOp->resultInfo.totalRows = 0;
// start from current accessed position
// we cannot start from the pScanInfo->tableEndIndex, since the commit offset may cause the rollback of the start
// we cannot start from the pScanInfo->currentTable, since the commit offset may cause the rollback of the start
// position, let's find it from the beginning.
index = tableListFind(pTableListInfo, uid, 0);
taosRUnLockLatch(&pTaskInfo->lock);
if (index >= 0) {
pScanInfo->tableEndIndex = index;
pScanInfo->currentTable = index;
} else {
qError("vgId:%d uid:%" PRIu64 " not found in table list, total:%d, index:%d %s", pTaskInfo->id.vgId, uid,
numOfTables, pScanInfo->tableEndIndex, id);
numOfTables, pScanInfo->currentTable, id);
terrno = TSDB_CODE_PAR_INTERNAL_ERROR;
return -1;
}
@ -1259,12 +1259,12 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
}
qDebug("tsdb reader created with offset(snapshot) uid:%" PRId64 " ts:%" PRId64 " table index:%d, total:%d, %s",
uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->tableEndIndex, numOfTables, id);
uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->currentTable, numOfTables, id);
} else {
pTaskInfo->storageAPI.tsdReader.tsdSetQueryTableList(pScanBaseInfo->dataReader, &keyInfo, 1);
pTaskInfo->storageAPI.tsdReader.tsdReaderResetStatus(pScanBaseInfo->dataReader, &pScanBaseInfo->cond);
qDebug("tsdb reader offset seek snapshot to uid:%" PRId64 " ts %" PRId64 " table index:%d numOfTable:%d, %s",
uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->tableEndIndex, numOfTables, id);
uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->currentTable, numOfTables, id);
}
// restore the key value

View File

@ -27,6 +27,7 @@ typedef struct SProjectOperatorInfo {
SLimitInfo limitInfo;
bool mergeDataBlocks;
SSDataBlock* pFinalRes;
bool inputIgnoreGroup;
} SProjectOperatorInfo;
typedef struct SIndefOperatorInfo {
@ -109,6 +110,7 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys
pInfo->pFinalRes = createOneDataBlock(pResBlock, false);
pInfo->binfo.inputTsOrder = pProjPhyNode->node.inputTsOrder;
pInfo->binfo.outputTsOrder = pProjPhyNode->node.outputTsOrder;
pInfo->inputIgnoreGroup = pProjPhyNode->inputIgnoreGroup;
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM || pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) {
pInfo->mergeDataBlocks = false;
@ -300,6 +302,10 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
return pBlock;
}
if (pProjectInfo->inputIgnoreGroup) {
pBlock->info.id.groupId = 0;
}
int32_t status = discardGroupDataBlock(pBlock, pLimitInfo);
if (status == PROJECT_RETRIEVE_CONTINUE) {
continue;

View File

@ -657,33 +657,17 @@ void setTbNameColData(const SSDataBlock* pBlock, SColumnInfoData* pColInfoData,
static void initNextGroupScan(STableScanInfo* pInfo, STableKeyInfo** pKeyInfo, int32_t* size) {
pInfo->tableStartIndex = pInfo->tableEndIndex + 1;
tableListGetGroupList(pInfo->base.pTableListInfo, pInfo->currentGroupId, pKeyInfo, size);
STableListInfo* pTableListInfo = pInfo->base.pTableListInfo;
int32_t numOfTables = tableListGetSize(pTableListInfo);
STableKeyInfo* pStart = (STableKeyInfo*)tableListGetInfo(pTableListInfo, pInfo->tableStartIndex);
pInfo->tableStartIndex = TARRAY_ELEM_IDX(pInfo->base.pTableListInfo->pTableList, *pKeyInfo);
if (pTableListInfo->oneTableForEachGroup) {
pInfo->tableEndIndex = pInfo->tableStartIndex;
} else if (pTableListInfo->groupOffset) {
pInfo->currentGroupIndex++;
if (pInfo->currentGroupIndex + 1 < pTableListInfo->numOfOuputGroups) {
pInfo->tableEndIndex = pTableListInfo->groupOffset[pInfo->currentGroupIndex + 1] - 1;
} else {
pInfo->tableEndIndex = numOfTables - 1;
}
} else {
pInfo->tableEndIndex = numOfTables - 1;
}
pInfo->tableEndIndex = (pInfo->tableStartIndex + (*size) - 1);
if (!pInfo->needCountEmptyTable) {
pInfo->countState = TABLE_COUNT_STATE_END;
} else {
pInfo->countState = TABLE_COUNT_STATE_SCAN;
}
*pKeyInfo = pStart;
*size = pInfo->tableEndIndex - pInfo->tableStartIndex + 1;
}
void markGroupProcessed(STableScanInfo* pInfo, uint64_t groupId) {
@ -939,7 +923,7 @@ static SSDataBlock* startNextGroupScan(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SStorageAPI* pAPI = &pTaskInfo->storageAPI;
int32_t numOfTables = tableListGetSize(pInfo->base.pTableListInfo);
if (pInfo->tableEndIndex + 1 >= numOfTables) {
if ((++pInfo->currentGroupId) >= tableListGetOutputGroups(pInfo->base.pTableListInfo)) {
setOperatorCompleted(pOperator);
if (pOperator->dynamicTask) {
taosArrayClear(pInfo->base.pTableListInfo->pTableList);
@ -978,13 +962,14 @@ static SSDataBlock* groupSeqTableScan(SOperatorInfo* pOperator) {
int32_t num = 0;
STableKeyInfo* pList = NULL;
if (pInfo->tableEndIndex == -1) {
if (pInfo->currentGroupId == -1) {
int32_t numOfTables = tableListGetSize(pInfo->base.pTableListInfo);
if (pInfo->tableEndIndex + 1 == numOfTables) {
if ((++pInfo->currentGroupId) >= tableListGetOutputGroups(pInfo->base.pTableListInfo)) {
setOperatorCompleted(pOperator);
return NULL;
}
initNextGroupScan(pInfo, &pList, &num);
ASSERT(pInfo->base.dataReader == NULL);
@ -1034,7 +1019,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
T_LONG_JMP(pTaskInfo->env, code);
}
if (pOperator->status == OP_EXEC_DONE) {
pInfo->tableEndIndex = -1;
pInfo->currentGroupId = -1;
pOperator->status = OP_OPENED;
SSDataBlock* result = NULL;
while (true) {
@ -1059,23 +1044,23 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
}
// if no data, switch to next table and continue scan
pInfo->tableEndIndex++;
pInfo->currentTable++;
taosRLockLatch(&pTaskInfo->lock);
numOfTables = tableListGetSize(pInfo->base.pTableListInfo);
if (pInfo->tableEndIndex >= numOfTables) {
if (pInfo->currentTable >= numOfTables) {
qDebug("all table checked in table list, total:%d, return NULL, %s", numOfTables, GET_TASKID(pTaskInfo));
taosRUnLockLatch(&pTaskInfo->lock);
return NULL;
}
tInfo = *(STableKeyInfo*)tableListGetInfo(pInfo->base.pTableListInfo, pInfo->tableEndIndex);
tInfo = *(STableKeyInfo*)tableListGetInfo(pInfo->base.pTableListInfo, pInfo->currentTable);
taosRUnLockLatch(&pTaskInfo->lock);
pAPI->tsdReader.tsdSetQueryTableList(pInfo->base.dataReader, &tInfo, 1);
qDebug("set uid:%" PRIu64 " into scanner, total tables:%d, index:%d/%d %s", tInfo.uid, numOfTables,
pInfo->tableEndIndex, numOfTables, GET_TASKID(pTaskInfo));
pInfo->currentTable, numOfTables, GET_TASKID(pTaskInfo));
pAPI->tsdReader.tsdReaderResetStatus(pInfo->base.dataReader, &pInfo->base.cond);
pInfo->scanTimes = 0;
@ -1168,8 +1153,9 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
goto _error;
}
pInfo->currentGroupId = -1;
pInfo->tableEndIndex = -1;
pInfo->currentGroupIndex = -1;
pInfo->assignBlockUid = pTableScanNode->assignBlockUid;
pInfo->hasGroupByTag = pTableScanNode->pGroupTags ? true : false;
@ -1264,6 +1250,7 @@ void resetTableScanInfo(STableScanInfo* pTableScanInfo, STimeWindow* pWin, uint6
pTableScanInfo->base.cond.startVersion = 0;
pTableScanInfo->base.cond.endVersion = ver;
pTableScanInfo->scanTimes = 0;
pTableScanInfo->currentGroupId = -1;
pTableScanInfo->tableEndIndex = -1;
pTableScanInfo->base.readerAPI.tsdReaderClose(pTableScanInfo->base.dataReader);
pTableScanInfo->base.dataReader = NULL;
@ -2167,7 +2154,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
pInfo->pTableScanOp->status = OP_OPENED;
pTSInfo->scanTimes = 0;
pTSInfo->tableEndIndex = -1;
pTSInfo->currentGroupId = -1;
}
if (pStreamInfo->recoverStep == STREAM_RECOVER_STEP__SCAN1) {
@ -3324,26 +3311,16 @@ _error:
return NULL;
}
static int32_t tableMergeScanDoSkipTable(STableMergeScanInfo* pInfo, SSDataBlock* pBlock) {
int64_t nRows = 0;
void* pNum = tSimpleHashGet(pInfo->mTableNumRows, &pBlock->info.id.uid, sizeof(pBlock->info.id.uid));
if (pNum == NULL) {
nRows = pBlock->info.rows;
tSimpleHashPut(pInfo->mTableNumRows, &pBlock->info.id.uid, sizeof(pBlock->info.id.uid), &nRows, sizeof(nRows));
} else {
*(int64_t*)pNum = *(int64_t*)pNum + pBlock->info.rows;
nRows = *(int64_t*)pNum;
}
if (nRows >= pInfo->mergeLimit) {
static void tableMergeScanDoSkipTable(uint64_t uid, void* pTableMergeScanInfo) {
STableMergeScanInfo* pInfo = pTableMergeScanInfo;
if (pInfo->mSkipTables == NULL) {
pInfo->mSkipTables = taosHashInit(pInfo->tableEndIndex - pInfo->tableStartIndex + 1,
taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_NO_LOCK);
}
int bSkip = 1;
taosHashPut(pInfo->mSkipTables, &pBlock->info.id.uid, sizeof(pBlock->info.id.uid), &bSkip, sizeof(bSkip));
if (pInfo->mSkipTables != NULL) {
taosHashPut(pInfo->mSkipTables, &uid, sizeof(uid), &bSkip, sizeof(bSkip));
}
return TSDB_CODE_SUCCESS;
}
static void doGetBlockForTableMergeScan(SOperatorInfo* pOperator, bool* pFinished, bool* pSkipped) {
@ -3459,10 +3436,6 @@ static SSDataBlock* getBlockForTableMergeScan(void* param) {
}
pBlock->info.id.groupId = tableListGetTableGroupId(pInfo->base.pTableListInfo, pBlock->info.id.uid);
if (pInfo->mergeLimit != -1) {
tableMergeScanDoSkipTable(pInfo, pBlock);
}
pOperator->resultInfo.totalRows += pBlock->info.rows;
pInfo->base.readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
return pBlock;
@ -3529,6 +3502,7 @@ int32_t startDurationForGroupTableMergeScan(SOperatorInfo* pOperator) {
pInfo->pSortInputBlock, pTaskInfo->id.str, 0, 0, 0);
tsortSetMergeLimit(pInfo->pSortHandle, pInfo->mergeLimit);
tsortSetMergeLimitReachedFp(pInfo->pSortHandle, tableMergeScanDoSkipTable, pInfo);
tsortSetAbortCheckFn(pInfo->pSortHandle, isTaskKilled, pOperator->pTaskInfo);
tsortSetFetchRawDataFp(pInfo->pSortHandle, getBlockForTableMergeScan, NULL, NULL);
@ -3756,8 +3730,6 @@ void destroyTableMergeScanOperatorInfo(void* param) {
taosArrayDestroy(pTableScanInfo->sortSourceParams);
tsortDestroySortHandle(pTableScanInfo->pSortHandle);
pTableScanInfo->pSortHandle = NULL;
tSimpleHashCleanup(pTableScanInfo->mTableNumRows);
pTableScanInfo->mTableNumRows = NULL;
taosHashCleanup(pTableScanInfo->mSkipTables);
pTableScanInfo->mSkipTables = NULL;
destroyTableScanBase(&pTableScanInfo->base, &pTableScanInfo->base.readerAPI);
@ -3849,8 +3821,7 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
pInfo->pSortInfo = generateSortByTsInfo(pInfo->base.matchInfo.pList, pInfo->base.cond.order);
pInfo->pSortInputBlock = createOneDataBlock(pInfo->pResBlock, false);
initLimitInfo(pTableScanNode->scan.node.pLimit, pTableScanNode->scan.node.pSlimit, &pInfo->limitInfo);
pInfo->mTableNumRows = tSimpleHashInit(1024,
taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT));
pInfo->mergeLimit = -1;
bool hasLimit = pInfo->limitInfo.limit.limit != -1 || pInfo->limitInfo.limit.offset != -1;
if (hasLimit) {

View File

@ -30,6 +30,7 @@
typedef struct SSessionAggOperatorInfo {
SOptrBasicInfo binfo;
SAggSupporter aggSup;
SExprSupp scalarSupp; // supporter for perform scalar function
SGroupResInfo groupResInfo;
SWindowRowsSup winSup;
bool reptScan; // next round scan
@ -1407,6 +1408,10 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) {
}
pBInfo->pRes->info.scanFlag = pBlock->info.scanFlag;
if (pInfo->scalarSupp.pExprInfo != NULL) {
SExprSupp* pExprSup = &pInfo->scalarSupp;
projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL);
}
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pSup, pBlock, order, MAIN_SCAN, true);
blockDataUpdateTsWindow(pBlock, pInfo->tsSlotId);
@ -1531,6 +1536,8 @@ void destroySWindowOperatorInfo(void* param) {
colDataDestroy(&pInfo->twAggSup.timeWindowData);
cleanupAggSup(&pInfo->aggSup);
cleanupExprSupp(&pInfo->scalarSupp);
cleanupGroupResInfo(&pInfo->groupResInfo);
taosMemoryFreeClear(param);
}
@ -1570,6 +1577,16 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionW
pInfo->reptScan = false;
pInfo->binfo.inputTsOrder = pSessionNode->window.node.inputTsOrder;
pInfo->binfo.outputTsOrder = pSessionNode->window.node.outputTsOrder;
if (pSessionNode->window.pExprs != NULL) {
int32_t numOfScalar = 0;
SExprInfo* pScalarExprInfo = createExprInfo(pSessionNode->window.pExprs, NULL, &numOfScalar);
code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
}
code = filterInitFromNode((SNode*)pSessionNode->window.node.pConditions, &pOperator->exprSupp.pFilterInfo, 0);
if (code != TSDB_CODE_SUCCESS) {
goto _error;

View File

@ -75,6 +75,9 @@ struct SSortHandle {
bool (*abortCheckFn)(void* param);
void* abortCheckParam;
void (*mergeLimitReachedFn)(uint64_t tableUid, void* param);
void* mergeLimitReachedParam;
};
void tsortSetSingleTableMerge(SSortHandle* pHandle) {
@ -1040,6 +1043,39 @@ static int32_t sortBlocksToExtSource(SSortHandle* pHandle, SArray* aBlk, SBlockO
return 0;
}
static SSDataBlock* getRowsBlockWithinMergeLimit(const SSortHandle* pHandle, SSHashObj* mTableNumRows, SSDataBlock* pOrigBlk, bool* pExtractedBlock) {
int64_t nRows = 0;
int64_t prevRows = 0;
void* pNum = tSimpleHashGet(mTableNumRows, &pOrigBlk->info.id.uid, sizeof(pOrigBlk->info.id.uid));
if (pNum == NULL) {
prevRows = 0;
nRows = pOrigBlk->info.rows;
tSimpleHashPut(mTableNumRows, &pOrigBlk->info.id.uid, sizeof(pOrigBlk->info.id.uid), &nRows, sizeof(nRows));
} else {
prevRows = *(int64_t*)pNum;
*(int64_t*)pNum = *(int64_t*)pNum + pOrigBlk->info.rows;
nRows = *(int64_t*)pNum;
}
int64_t keepRows = pOrigBlk->info.rows;
if (nRows >= pHandle->mergeLimit) {
if (pHandle->mergeLimitReachedFn) {
pHandle->mergeLimitReachedFn(pOrigBlk->info.id.uid, pHandle->mergeLimitReachedParam);
}
keepRows = pHandle->mergeLimit - prevRows;
}
SSDataBlock* pBlock = NULL;
if (keepRows != pOrigBlk->info.rows) {
pBlock = blockDataExtractBlock(pOrigBlk, 0, keepRows);
*pExtractedBlock = true;
} else {
*pExtractedBlock = false;
pBlock = pOrigBlk;
}
return pBlock;
}
static int32_t createBlocksMergeSortInitialSources(SSortHandle* pHandle) {
SBlockOrderInfo* pOrder = taosArrayGet(pHandle->pSortInfo, 0);
size_t nSrc = taosArrayGetSize(pHandle->pOrderedSource);
@ -1062,10 +1098,18 @@ static int32_t createBlocksMergeSortInitialSources(SSortHandle* pHandle) {
pHandle->currMergeLimitTs = INT64_MIN;
}
SSHashObj* mTableNumRows = tSimpleHashInit(8192, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT));
SArray* aBlkSort = taosArrayInit(8, POINTER_BYTES);
SSHashObj* mUidBlk = tSimpleHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT));
while (1) {
SSDataBlock* pBlk = pHandle->fetchfp(pSrc->param);
int64_t p = taosGetTimestampUs();
bool bExtractedBlock = false;
if (pBlk != NULL && pHandle->mergeLimit > 0) {
pBlk = getRowsBlockWithinMergeLimit(pHandle, mTableNumRows, pBlk, &bExtractedBlock);
}
if (pBlk != NULL) {
SColumnInfoData* tsCol = taosArrayGet(pBlk->pDataBlock, pOrder->slotId);
int64_t firstRowTs = *(int64_t*)tsCol->pData;
@ -1074,6 +1118,7 @@ static int32_t createBlocksMergeSortInitialSources(SSortHandle* pHandle) {
continue;
}
}
if (pBlk != NULL) {
szSort += blockDataGetSize(pBlk);
@ -1081,8 +1126,11 @@ static int32_t createBlocksMergeSortInitialSources(SSortHandle* pHandle) {
if (ppBlk != NULL) {
SSDataBlock* tBlk = *(SSDataBlock**)(ppBlk);
blockDataMerge(tBlk, pBlk);
if (bExtractedBlock) {
blockDataDestroy(pBlk);
}
} else {
SSDataBlock* tBlk = createOneDataBlock(pBlk, true);
SSDataBlock* tBlk = (bExtractedBlock) ? pBlk : createOneDataBlock(pBlk, true);
tSimpleHashPut(mUidBlk, &pBlk->info.id.uid, sizeof(pBlk->info.id.uid), &tBlk, POINTER_BYTES);
taosArrayPush(aBlkSort, &tBlk);
}
@ -1091,7 +1139,6 @@ static int32_t createBlocksMergeSortInitialSources(SSortHandle* pHandle) {
if ((pBlk != NULL && szSort > maxBufSize) || (pBlk == NULL && szSort > 0)) {
tSimpleHashClear(mUidBlk);
int64_t p = taosGetTimestampUs();
code = sortBlocksToExtSource(pHandle, aBlkSort, pOrder, aExtSrc);
if (code != TSDB_CODE_SUCCESS) {
tSimpleHashCleanup(mUidBlk);
@ -1131,7 +1178,7 @@ static int32_t createBlocksMergeSortInitialSources(SSortHandle* pHandle) {
taosArrayAddAll(pHandle->pOrderedSource, aExtSrc);
}
taosArrayDestroy(aExtSrc);
tSimpleHashCleanup(mTableNumRows);
pHandle->type = SORT_SINGLESOURCE_SORT;
return TSDB_CODE_SUCCESS;
}
@ -1610,3 +1657,8 @@ int32_t tsortCompAndBuildKeys(const SArray* pSortCols, char* keyBuf, int32_t* ke
}
return ret;
}
void tsortSetMergeLimitReachedFp(SSortHandle* pHandle, void (*mergeLimitReachedCb)(uint64_t tableUid, void* param), void* param) {
pHandle->mergeLimitReachedFn = mergeLimitReachedCb;
pHandle->mergeLimitReachedParam = param;
}

View File

@ -19,6 +19,7 @@
#include "thash.h"
#include "tsimplehash.h"
#include "executor.h"
#include "executorInt.h"
#include "ttime.h"
#pragma GCC diagnostic push
@ -186,4 +187,20 @@ TEST(testCase, timewindow_natural) {
ASSERT_EQ(w3.skey, w4.skey);
ASSERT_EQ(w3.ekey, w4.ekey);
}
TEST(testCase, timewindow_active) {
osSetTimezone("CST");
int32_t precision = TSDB_TIME_PRECISION_MILLI;
int64_t offset = (int64_t)2*365*24*60*60*1000;
SInterval interval = createInterval(10, 10, offset, 'y', 'y', 0, precision);
SResultRowInfo dumyInfo = {0};
dumyInfo.cur.pageId = -1;
int64_t key = (int64_t)1609430400*1000; // 2021-01-01
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, key, &interval, TSDB_ORDER_ASC);
printTimeWindow(&win, precision, key);
printf("%ld win %ld, %ld\n", key, win.skey, win.ekey);
ASSERT_EQ(win.skey, 1325376000000);
ASSERT_EQ(win.ekey, 1640908799999);
}
#pragma GCC diagnostic pop

View File

@ -1,6 +1,17 @@
//
// Created by slzhou on 22-4-20.
//
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_FNLOG_H
#define TDENGINE_FNLOG_H

View File

@ -952,7 +952,7 @@ static int32_t translateLeastSQR(SFunctionNode* pFunc, char* pErrBuf, int32_t le
}
}
pFunc->node.resType = (SDataType){.bytes = 64, .type = TSDB_DATA_TYPE_BINARY};
pFunc->node.resType = (SDataType){.bytes = LEASTSQUARES_BUFF_LENGTH, .type = TSDB_DATA_TYPE_BINARY};
return TSDB_CODE_SUCCESS;
}
@ -1305,11 +1305,14 @@ static bool validateStateOper(const SValueNode* pVal) {
if (TSDB_DATA_TYPE_BINARY != pVal->node.resType.type) {
return false;
}
if (strlen(varDataVal(pVal->datum.p)) == 2) {
return (
0 == strncasecmp(varDataVal(pVal->datum.p), "GT", 2) || 0 == strncasecmp(varDataVal(pVal->datum.p), "GE", 2) ||
0 == strncasecmp(varDataVal(pVal->datum.p), "LT", 2) || 0 == strncasecmp(varDataVal(pVal->datum.p), "LE", 2) ||
0 == strncasecmp(varDataVal(pVal->datum.p), "EQ", 2) || 0 == strncasecmp(varDataVal(pVal->datum.p), "NE", 2));
}
return false;
}
static int32_t translateStateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
@ -3737,7 +3740,11 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.translateFunc = translateTbUidColumn,
.getEnvFunc = NULL,
.initFunc = NULL,
#ifdef BUILD_NO_CALL
.sprocessFunc = qTbUidFunction,
#else
.sprocessFunc = NULL,
#endif
.finalizeFunc = NULL
},
{
@ -3747,7 +3754,11 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.translateFunc = translateVgIdColumn,
.getEnvFunc = NULL,
.initFunc = NULL,
#ifdef BUILD_NO_CALL
.sprocessFunc = qVgIdFunction,
#else
.sprocessFunc = NULL,
#endif
.finalizeFunc = NULL
},
{

View File

@ -1576,9 +1576,19 @@ int32_t leastSQRFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
param12 /= param[1][1];
char buf[512] = {0};
char buf[LEASTSQUARES_BUFF_LENGTH] = {0};
char slopBuf[64] = {0};
char interceptBuf[64] = {0};
int n = snprintf(slopBuf, 64, "%.6lf", param02);
if (n > LEASTSQUARES_DOUBLE_ITEM_LENGTH) {
snprintf(slopBuf, 64, "%." DOUBLE_PRECISION_DIGITS, param02);
}
n = snprintf(interceptBuf, 64, "%.6lf", param12);
if (n > LEASTSQUARES_DOUBLE_ITEM_LENGTH) {
snprintf(interceptBuf, 64, "%." DOUBLE_PRECISION_DIGITS, param12);
}
size_t len =
snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{slop:%.6lf, intercept:%.6lf}", param02, param12);
snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{slop:%s, intercept:%s}", slopBuf, interceptBuf);
varDataSetLen(buf, len);
colDataSetVal(pCol, currentRow, buf, pResInfo->isNullRes);

View File

@ -486,6 +486,7 @@ static int32_t logicProjectCopy(const SProjectLogicNode* pSrc, SProjectLogicNode
CLONE_NODE_LIST_FIELD(pProjections);
COPY_CHAR_ARRAY_FIELD(stmtName);
COPY_SCALAR_FIELD(ignoreGroupId);
COPY_SCALAR_FIELD(inputIgnoreGroup);
return TSDB_CODE_SUCCESS;
}
@ -728,6 +729,15 @@ static int32_t physiPartitionCopy(const SPartitionPhysiNode* pSrc, SPartitionPhy
return TSDB_CODE_SUCCESS;
}
static int32_t physiProjectCopy(const SProjectPhysiNode* pSrc, SProjectPhysiNode* pDst) {
COPY_BASE_OBJECT_FIELD(node, physiNodeCopy);
CLONE_NODE_LIST_FIELD(pProjections);
COPY_SCALAR_FIELD(mergeDataBlock);
COPY_SCALAR_FIELD(ignoreGroupId);
COPY_SCALAR_FIELD(inputIgnoreGroup);
return TSDB_CODE_SUCCESS;
}
static int32_t dataBlockDescCopy(const SDataBlockDescNode* pSrc, SDataBlockDescNode* pDst) {
COPY_SCALAR_FIELD(dataBlockId);
CLONE_NODE_LIST_FIELD(pSlots);
@ -959,6 +969,9 @@ SNode* nodesCloneNode(const SNode* pNode) {
case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
code = physiPartitionCopy((const SPartitionPhysiNode*)pNode, (SPartitionPhysiNode*)pDst);
break;
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
code = physiProjectCopy((const SProjectPhysiNode*)pNode, (SProjectPhysiNode*)pDst);
break;
default:
break;
}

View File

@ -784,6 +784,7 @@ static int32_t jsonToLogicScanNode(const SJson* pJson, void* pObj) {
static const char* jkProjectLogicPlanProjections = "Projections";
static const char* jkProjectLogicPlanIgnoreGroupId = "IgnoreGroupId";
static const char* jkProjectLogicPlanInputIgnoreGroup= "InputIgnoreGroup";
static int32_t logicProjectNodeToJson(const void* pObj, SJson* pJson) {
const SProjectLogicNode* pNode = (const SProjectLogicNode*)pObj;
@ -795,6 +796,9 @@ static int32_t logicProjectNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkProjectLogicPlanIgnoreGroupId, pNode->ignoreGroupId);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkProjectLogicPlanInputIgnoreGroup, pNode->inputIgnoreGroup);
}
return code;
}
@ -809,7 +813,9 @@ static int32_t jsonToLogicProjectNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkProjectLogicPlanIgnoreGroupId, &pNode->ignoreGroupId);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkProjectLogicPlanInputIgnoreGroup, &pNode->inputIgnoreGroup);
}
return code;
}
@ -2067,6 +2073,7 @@ static int32_t jsonToPhysiSysTableScanNode(const SJson* pJson, void* pObj) {
static const char* jkProjectPhysiPlanProjections = "Projections";
static const char* jkProjectPhysiPlanMergeDataBlock = "MergeDataBlock";
static const char* jkProjectPhysiPlanIgnoreGroupId = "IgnoreGroupId";
static const char* jkProjectPhysiPlanInputIgnoreGroup = "InputIgnoreGroup";
static int32_t physiProjectNodeToJson(const void* pObj, SJson* pJson) {
const SProjectPhysiNode* pNode = (const SProjectPhysiNode*)pObj;
@ -2081,7 +2088,9 @@ static int32_t physiProjectNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkProjectPhysiPlanIgnoreGroupId, pNode->ignoreGroupId);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkProjectPhysiPlanInputIgnoreGroup, pNode->inputIgnoreGroup);
}
return code;
}
@ -2098,7 +2107,9 @@ static int32_t jsonToPhysiProjectNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkProjectPhysiPlanIgnoreGroupId, &pNode->ignoreGroupId);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkProjectPhysiPlanInputIgnoreGroup, &pNode->inputIgnoreGroup);
}
return code;
}

View File

@ -2367,7 +2367,8 @@ enum {
PHY_PROJECT_CODE_BASE_NODE = 1,
PHY_PROJECT_CODE_PROJECTIONS,
PHY_PROJECT_CODE_MERGE_DATA_BLOCK,
PHY_PROJECT_CODE_IGNORE_GROUP_ID
PHY_PROJECT_CODE_IGNORE_GROUP_ID,
PHY_PROJECT_CODE_INPUT_IGNORE_GROUP
};
static int32_t physiProjectNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
@ -2383,6 +2384,9 @@ static int32_t physiProjectNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
if (TSDB_CODE_SUCCESS == code) {
code = tlvEncodeBool(pEncoder, PHY_PROJECT_CODE_IGNORE_GROUP_ID, pNode->ignoreGroupId);
}
if (TSDB_CODE_SUCCESS == code) {
code = tlvEncodeBool(pEncoder, PHY_PROJECT_CODE_INPUT_IGNORE_GROUP, pNode->inputIgnoreGroup);
}
return code;
}
@ -2406,6 +2410,9 @@ static int32_t msgToPhysiProjectNode(STlvDecoder* pDecoder, void* pObj) {
case PHY_PROJECT_CODE_IGNORE_GROUP_ID:
code = tlvDecodeBool(pTlv, &pNode->ignoreGroupId);
break;
case PHY_PROJECT_CODE_INPUT_IGNORE_GROUP:
code = tlvDecodeBool(pTlv, &pNode->inputIgnoreGroup);
break;
default:
break;
}

View File

@ -216,12 +216,13 @@ void nodesWalkExprsPostOrder(SNodeList* pList, FNodeWalker walker, void* pContex
static void checkParamIsFunc(SFunctionNode* pFunc) {
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
if (numOfParams > 1) {
for (int32_t i = 0; i < numOfParams; ++i) {
SNode* pPara = nodesListGetNode(pFunc->pParameterList, i);
if (nodeType(pPara) == QUERY_NODE_FUNCTION) {
if (numOfParams > 1 && nodeType(pPara) == QUERY_NODE_FUNCTION) {
((SFunctionNode*)pPara)->node.asParam = true;
}
if (nodeType(pPara) == QUERY_NODE_COLUMN) {
((SColumnNode*)pPara)->node.asParam = true;
}
}
}

View File

@ -440,14 +440,14 @@ static int32_t parseVarbinary(SToken* pToken, uint8_t **pData, uint32_t *nData,
return TSDB_CODE_PAR_INVALID_VARBINARY;
}
if(isHex(pToken->z, pToken->n)){
if(!isValidateHex(pToken->z, pToken->n)){
if(isHex(pToken->z + 1, pToken->n - 2)){
if(!isValidateHex(pToken->z + 1, pToken->n - 2)){
return TSDB_CODE_PAR_INVALID_VARBINARY;
}
void* data = NULL;
uint32_t size = 0;
if(taosHex2Ascii(pToken->z, pToken->n, &data, &size) < 0){
if(taosHex2Ascii(pToken->z + 1, pToken->n - 2, &data, &size) < 0){
return TSDB_CODE_OUT_OF_MEMORY;
}
@ -458,11 +458,13 @@ static int32_t parseVarbinary(SToken* pToken, uint8_t **pData, uint32_t *nData,
*pData = data;
*nData = size;
}else{
if (pToken->n + VARSTR_HEADER_SIZE > bytes) {
*pData = taosMemoryCalloc(1, pToken->n);
int32_t len = trimString(pToken->z, pToken->n, *pData, pToken->n);
*nData = len;
if (*nData + VARSTR_HEADER_SIZE > bytes) {
return TSDB_CODE_PAR_VALUE_TOO_LONG;
}
*pData = taosStrdup(pToken->z);
*nData = pToken->n;
}
return TSDB_CODE_SUCCESS;
}
@ -753,7 +755,7 @@ static int32_t buildCreateTbReq(SVnodeModifyOpStmt* pStmt, STag* pTag, SArray* p
return TSDB_CODE_SUCCESS;
}
static int32_t checkAndTrimValue(SToken* pToken, char* tmpTokenBuf, SMsgBuf* pMsgBuf) {
static int32_t checkAndTrimValue(SToken* pToken, char* tmpTokenBuf, SMsgBuf* pMsgBuf, int8_t type) {
if ((pToken->type != TK_NOW && pToken->type != TK_TODAY && pToken->type != TK_NK_INTEGER &&
pToken->type != TK_NK_STRING && pToken->type != TK_NK_FLOAT && pToken->type != TK_NK_BOOL &&
pToken->type != TK_NULL && pToken->type != TK_NK_HEX && pToken->type != TK_NK_OCT &&
@ -763,7 +765,7 @@ static int32_t checkAndTrimValue(SToken* pToken, char* tmpTokenBuf, SMsgBuf* pMs
}
// Remove quotation marks
if (TK_NK_STRING == pToken->type) {
if (TK_NK_STRING == pToken->type && type != TSDB_DATA_TYPE_VARBINARY) {
if (pToken->n >= TSDB_MAX_BYTES_PER_ROW) {
return buildSyntaxErrMsg(pMsgBuf, "too long string", pToken->z);
}
@ -935,7 +937,7 @@ static int32_t parseTagsClauseImpl(SInsertParseContext* pCxt, SVnodeModifyOpStmt
SSchema* pTagSchema = &pSchema[pCxt->tags.pColIndex[i]];
isJson = pTagSchema->type == TSDB_DATA_TYPE_JSON;
code = checkAndTrimValue(&token, pCxt->tmpTokenBuf, &pCxt->msg);
code = checkAndTrimValue(&token, pCxt->tmpTokenBuf, &pCxt->msg, pTagSchema->type);
if (TK_NK_VARIABLE == token.type) {
code = buildSyntaxErrMsg(&pCxt->msg, "not expected tags values ", token.z);
}
@ -1631,7 +1633,7 @@ static int32_t parseValueTokenImpl(SInsertParseContext* pCxt, const char** pSql,
static int32_t parseValueToken(SInsertParseContext* pCxt, const char** pSql, SToken* pToken, SSchema* pSchema,
int16_t timePrec, SColVal* pVal) {
int32_t code = checkAndTrimValue(pToken, pCxt->tmpTokenBuf, &pCxt->msg);
int32_t code = checkAndTrimValue(pToken, pCxt->tmpTokenBuf, &pCxt->msg, pSchema->type);
if (TSDB_CODE_SUCCESS == code && isNullValue(pSchema->type, pToken)) {
if (TSDB_DATA_TYPE_TIMESTAMP == pSchema->type && PRIMARYKEY_TIMESTAMP_COL_ID == pSchema->colId) {
return buildSyntaxErrMsg(&pCxt->msg, "primary timestamp should not be null", pToken->z);
@ -1691,7 +1693,7 @@ typedef union SRowsDataContext{
static int32_t parseTbnameToken(SInsertParseContext* pCxt, SStbRowsDataContext* pStbRowsCxt, SToken* pToken, bool* pFoundCtbName) {
*pFoundCtbName = false;
int32_t code = checkAndTrimValue(pToken, pCxt->tmpTokenBuf, &pCxt->msg);
int32_t code = checkAndTrimValue(pToken, pCxt->tmpTokenBuf, &pCxt->msg, TSDB_DATA_TYPE_BINARY);
if (TK_NK_VARIABLE == pToken->type) {
code = buildInvalidOperationMsg(&pCxt->msg, "not expected tbname");
}
@ -1731,7 +1733,7 @@ static int32_t processCtbTagsAfterCtbName(SInsertParseContext* pCxt, SVnodeModif
for (int32_t i = 0; code == TSDB_CODE_SUCCESS && i < numOfTagTokens; ++i) {
SToken* pTagToken = (SToken*)(tagTokens + i);
SSchema* pTagSchema = tagSchemas[i];
code = checkAndTrimValue(pTagToken, pCxt->tmpTokenBuf, &pCxt->msg);
code = checkAndTrimValue(pTagToken, pCxt->tmpTokenBuf, &pCxt->msg, pTagSchema->type);
if (TK_NK_VARIABLE == pTagToken->type) {
code = buildInvalidOperationMsg(&pCxt->msg, "not expected tag");
}
@ -1790,7 +1792,7 @@ static int32_t doGetStbRowValues(SInsertParseContext* pCxt, SVnodeModifyOpStmt*
tagSchemas[(*pNumOfTagTokens)] = (SSchema*)pTagSchema;
++(*pNumOfTagTokens);
} else {
code = checkAndTrimValue(pToken, pCxt->tmpTokenBuf, &pCxt->msg);
code = checkAndTrimValue(pToken, pCxt->tmpTokenBuf, &pCxt->msg, pTagSchema->type);
if (TK_NK_VARIABLE == pToken->type) {
code = buildInvalidOperationMsg(&pCxt->msg, "not expected row value");
}

View File

@ -247,13 +247,13 @@ static int32_t createTableDataCxt(STableMeta* pTableMeta, SVCreateTbReq** pCreat
if (NULL == pTableCxt->pData) {
code = TSDB_CODE_OUT_OF_MEMORY;
} else {
pTableCxt->pData->flags = NULL != *pCreateTbReq ? SUBMIT_REQ_AUTO_CREATE_TABLE : 0;
pTableCxt->pData->flags = (pCreateTbReq != NULL && NULL != *pCreateTbReq) ? SUBMIT_REQ_AUTO_CREATE_TABLE : 0;
pTableCxt->pData->flags |= colMode ? SUBMIT_REQ_COLUMN_DATA_FORMAT : 0;
pTableCxt->pData->suid = pTableMeta->suid;
pTableCxt->pData->uid = pTableMeta->uid;
pTableCxt->pData->sver = pTableMeta->sversion;
pTableCxt->pData->pCreateTbReq = *pCreateTbReq;
*pCreateTbReq = NULL;
pTableCxt->pData->pCreateTbReq = pCreateTbReq != NULL ? *pCreateTbReq : NULL;
if(pCreateTbReq != NULL) *pCreateTbReq = NULL;
if (pTableCxt->pData->flags & SUBMIT_REQ_COLUMN_DATA_FORMAT) {
pTableCxt->pData->aCol = taosArrayInit(128, sizeof(SColData));
if (NULL == pTableCxt->pData->aCol) {
@ -640,12 +640,12 @@ static bool findFileds(SSchema* pSchema, TAOS_FIELD* fields, int numFields) {
return false;
}
int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, TAOS_FIELD* tFields,
int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq** pCreateTb, TAOS_FIELD* tFields,
int numFields, bool needChangeLength) {
void* tmp = taosHashGet(((SVnodeModifyOpStmt*)(query->pRoot))->pTableBlockHashObj, &pTableMeta->uid, sizeof(pTableMeta->uid));
STableDataCxt* pTableCxt = NULL;
int ret = insGetTableDataCxt(((SVnodeModifyOpStmt*)(query->pRoot))->pTableBlockHashObj, &pTableMeta->uid,
sizeof(pTableMeta->uid), pTableMeta, &pCreateTb, &pTableCxt, true, false);
sizeof(pTableMeta->uid), pTableMeta, pCreateTb, &pTableCxt, true, false);
if (ret != TSDB_CODE_SUCCESS) {
uError("insGetTableDataCxt error");
goto end;
@ -662,6 +662,7 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate
char* p = (char*)data;
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
// length |
int32_t version = *(int32_t*)data;
p += sizeof(int32_t);
p += sizeof(int32_t);
@ -717,7 +718,7 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate
goto end;
}
fields += sizeof(int8_t) + sizeof(int32_t);
if (needChangeLength) {
if (needChangeLength && version == 1) {
pStart += htonl(colLength[j]);
} else {
pStart += colLength[j];
@ -748,7 +749,7 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate
goto end;
}
fields += sizeof(int8_t) + sizeof(int32_t);
if (needChangeLength) {
if (needChangeLength && version == 1) {
pStart += htonl(colLength[i]);
} else {
pStart += colLength[i];

View File

@ -1085,21 +1085,31 @@ static EDealRes translateColumnWithoutPrefix(STranslateContext* pCxt, SColumnNod
static EDealRes translateColumnUseAlias(STranslateContext* pCxt, SColumnNode** pCol, bool* pFound) {
SNodeList* pProjectionList = getProjectListFromCurrStmt(pCxt->pCurrStmt);
SNode* pNode;
SNode* pFoundNode = NULL;
*pFound = false;
FOREACH(pNode, pProjectionList) {
SExprNode* pExpr = (SExprNode*)pNode;
if (0 == strcmp((*pCol)->colName, pExpr->userAlias)) {
SNode* pNew = nodesCloneNode(pNode);
if (true == *pFound) {
if(nodesEqualNode(pFoundNode, pNode)) {
continue;
}
pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ORDERBY_AMBIGUOUS, (*pCol)->colName);
return DEAL_RES_ERROR;
}
*pFound = true;
pFoundNode = pNode;
}
}
if (*pFound) {
SNode* pNew = nodesCloneNode(pFoundNode);
if (NULL == pNew) {
pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY;
return DEAL_RES_ERROR;
}
nodesDestroyNode(*(SNode**)pCol);
*(SNode**)pCol = (SNode*)pNew;
*pFound = true;
return DEAL_RES_CONTINUE;
}
}
*pFound = false;
return DEAL_RES_CONTINUE;
}
@ -1313,7 +1323,7 @@ static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode** pCol) {
res = translateColumnWithPrefix(pCxt, pCol);
} else {
bool found = false;
if (SQL_CLAUSE_ORDER_BY == pCxt->currClause) {
if (SQL_CLAUSE_ORDER_BY == pCxt->currClause && !(*pCol)->node.asParam) {
res = translateColumnUseAlias(pCxt, pCol, &found);
}
if (DEAL_RES_ERROR != res && !found) {
@ -1323,6 +1333,10 @@ static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode** pCol) {
res = translateColumnWithoutPrefix(pCxt, pCol);
}
}
if(SQL_CLAUSE_ORDER_BY == pCxt->currClause && !(*pCol)->node.asParam
&& res != DEAL_RES_CONTINUE && res != DEAL_RES_END) {
res = translateColumnUseAlias(pCxt, pCol, &found);
}
}
return res;
}
@ -3971,6 +3985,42 @@ static int32_t translateSpecificWindow(STranslateContext* pCxt, SSelectStmt* pSe
return TSDB_CODE_SUCCESS;
}
static EDealRes collectWindowsPseudocolumns(SNode* pNode, void* pContext) {
SNodeList* pCols = (SNodeList*)pContext;
if (QUERY_NODE_FUNCTION == nodeType(pNode)) {
SFunctionNode* pFunc = (SFunctionNode*)pNode;
if (FUNCTION_TYPE_WSTART == pFunc->funcType || FUNCTION_TYPE_WEND == pFunc->funcType ||
FUNCTION_TYPE_WDURATION == pFunc->funcType) {
nodesListStrictAppend(pCols, nodesCloneNode(pNode));
}
}
return DEAL_RES_CONTINUE;
}
static int32_t checkWindowsConditonValid(SNode* pNode) {
int32_t code = TSDB_CODE_SUCCESS;
if(QUERY_NODE_EVENT_WINDOW != nodeType(pNode)) return code;
SEventWindowNode* pEventWindowNode = (SEventWindowNode*)pNode;
SNodeList* pCols = nodesMakeList();
if (NULL == pCols) {
return TSDB_CODE_OUT_OF_MEMORY;
}
nodesWalkExpr(pEventWindowNode->pStartCond, collectWindowsPseudocolumns, pCols);
if (pCols->length > 0) {
code = TSDB_CODE_QRY_INVALID_WINDOW_CONDITION;
}
if (TSDB_CODE_SUCCESS == code) {
nodesWalkExpr(pEventWindowNode->pEndCond, collectWindowsPseudocolumns, pCols);
if (pCols->length > 0) {
code = TSDB_CODE_QRY_INVALID_WINDOW_CONDITION;
}
}
nodesDestroyList(pCols);
return code;
}
static int32_t translateWindow(STranslateContext* pCxt, SSelectStmt* pSelect) {
if (NULL == pSelect->pWindow) {
return TSDB_CODE_SUCCESS;
@ -3984,6 +4034,9 @@ static int32_t translateWindow(STranslateContext* pCxt, SSelectStmt* pSelect) {
if (TSDB_CODE_SUCCESS == code) {
code = translateSpecificWindow(pCxt, pSelect);
}
if (TSDB_CODE_SUCCESS == code) {
code = checkWindowsConditonValid(pSelect->pWindow);
}
return code;
}
@ -4504,8 +4557,10 @@ static EDealRes replaceOrderByAliasImpl(SNode** pNode, void* pContext) {
if (QUERY_NODE_COLUMN == nodeType(*pNode)) {
FOREACH(pProject, pProjectionList) {
SExprNode* pExpr = (SExprNode*)pProject;
if (0 == strcmp(((SColumnRefNode*)*pNode)->colName, pExpr->userAlias)
&& nodeType(*pNode) == nodeType(pProject)) {
if (0 == strcmp(((SColumnRefNode*)*pNode)->colName, pExpr->userAlias) && nodeType(*pNode) == nodeType(pProject)) {
if (QUERY_NODE_COLUMN == nodeType(pProject) && !nodesEqualNode(*pNode, pProject)) {
continue;
}
SNode* pNew = nodesCloneNode(pProject);
if (NULL == pNew) {
pCxt->pTranslateCxt->errCode = TSDB_CODE_OUT_OF_MEMORY;

View File

@ -190,6 +190,8 @@ static char* getSyntaxErrFormat(int32_t errCode) {
return "invalid ip range";
case TSDB_CODE_OUT_OF_MEMORY:
return "Out of memory";
case TSDB_CODE_PAR_ORDERBY_AMBIGUOUS:
return "ORDER BY \"%s\" is ambiguous";
default:
return "Unknown error";
}

View File

@ -338,6 +338,7 @@ static void scanPathOptSetScanOrder(EScanOrder scanOrder, SScanLogicNode* pScan)
if (pScan->sortPrimaryKey || pScan->scanSeq[0] > 1 || pScan->scanSeq[1] > 1) {
return;
}
pScan->node.outputTsOrder = (SCAN_ORDER_ASC == scanOrder) ? ORDER_ASC : ORDER_DESC;
switch (scanOrder) {
case SCAN_ORDER_ASC:
pScan->scanSeq[0] = 1;
@ -1450,6 +1451,132 @@ static int32_t sortPrimaryKeyOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLo
return sortPrimaryKeyOptimizeImpl(pCxt, pLogicSubplan, pSort);
}
static int32_t sortForJoinOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan, SJoinLogicNode* pJoin) {
SLogicNode* pLeft = (SLogicNode*)nodesListGetNode(pJoin->node.pChildren, 0);
SLogicNode* pRight = (SLogicNode*)nodesListGetNode(pJoin->node.pChildren, 1);
SScanLogicNode* pScan = NULL;
if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pLeft) && ((SScanLogicNode*)pLeft)->node.outputTsOrder != SCAN_ORDER_BOTH) {
pScan = (SScanLogicNode*)pLeft;
} else if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pRight) && ((SScanLogicNode*)pRight)->node.outputTsOrder != SCAN_ORDER_BOTH) {
pScan = (SScanLogicNode*)pRight;
}
if (NULL != pScan) {
switch (pScan->node.outputTsOrder) {
case SCAN_ORDER_ASC:
pScan->scanSeq[0] = 0;
pScan->scanSeq[1] = 1;
pScan->node.outputTsOrder = ORDER_DESC;
goto _return;
case SCAN_ORDER_DESC:
pScan->scanSeq[0] = 1;
pScan->scanSeq[1] = 0;
pScan->node.outputTsOrder = ORDER_ASC;
goto _return;
default:
break;
}
}
if (QUERY_NODE_OPERATOR != nodeType(pJoin->pPrimKeyEqCond)) {
return TSDB_CODE_PLAN_INTERNAL_ERROR;
}
bool res = false;
SOperatorNode* pOp = (SOperatorNode*)pJoin->pPrimKeyEqCond;
if (QUERY_NODE_COLUMN != nodeType(pOp->pLeft) || QUERY_NODE_COLUMN != nodeType(pOp->pRight)) {
return TSDB_CODE_PLAN_INTERNAL_ERROR;
}
SNode* pOrderByNode = NULL;
SSHashObj* pLeftTables = NULL;
collectTableAliasFromNodes(nodesListGetNode(pJoin->node.pChildren, 0), &pLeftTables);
if (NULL != tSimpleHashGet(pLeftTables, ((SColumnNode*)pOp->pLeft)->tableAlias, strlen(((SColumnNode*)pOp->pLeft)->tableAlias))) {
pOrderByNode = pOp->pLeft;
} else if (NULL != tSimpleHashGet(pLeftTables, ((SColumnNode*)pOp->pRight)->tableAlias, strlen(((SColumnNode*)pOp->pRight)->tableAlias))) {
pOrderByNode = pOp->pRight;
}
tSimpleHashCleanup(pLeftTables);
if (NULL == pOrderByNode) {
return TSDB_CODE_PLAN_INTERNAL_ERROR;
}
SSortLogicNode* pSort = (SSortLogicNode*)nodesMakeNode(QUERY_NODE_LOGIC_PLAN_SORT);
if (NULL == pSort) {
return TSDB_CODE_OUT_OF_MEMORY;
}
pSort->node.outputTsOrder = (ORDER_ASC == pLeft->outputTsOrder) ? ORDER_DESC : ORDER_ASC;
pSort->groupSort = false;
SOrderByExprNode* pOrder = (SOrderByExprNode*)nodesMakeNode(QUERY_NODE_ORDER_BY_EXPR);
if (NULL == pOrder) {
nodesDestroyNode((SNode *)pSort);
return TSDB_CODE_OUT_OF_MEMORY;
}
nodesListMakeAppend(&pSort->pSortKeys, (SNode*)pOrder);
pOrder->order = (ORDER_ASC == pLeft->outputTsOrder) ? ORDER_DESC : ORDER_ASC;
pOrder->pExpr = nodesCloneNode(pOrderByNode);
pOrder->nullOrder = (ORDER_ASC == pOrder->order) ? NULL_ORDER_FIRST : NULL_ORDER_LAST;
if (!pOrder->pExpr) {
nodesDestroyNode((SNode *)pSort);
return TSDB_CODE_OUT_OF_MEMORY;
}
pLeft->pParent = (SLogicNode*)pSort;
nodesListMakeAppend(&pSort->node.pChildren, (SNode*)pLeft);
pJoin->node.pChildren->pHead->pNode = (SNode*)pSort;
pSort->node.pParent = (SLogicNode*)pJoin;;
_return:
pCxt->optimized = true;
return TSDB_CODE_SUCCESS;
}
static bool sortForJoinOptMayBeOptimized(SLogicNode* pNode) {
if (QUERY_NODE_LOGIC_PLAN_JOIN != nodeType(pNode)) {
return false;
}
SJoinLogicNode* pJoin = (SJoinLogicNode*)pNode;
if (pNode->pChildren->length != 2 || !pJoin->hasSubQuery || pJoin->isLowLevelJoin) {
return false;
}
SLogicNode* pLeft = (SLogicNode*)nodesListGetNode(pJoin->node.pChildren, 0);
SLogicNode* pRight = (SLogicNode*)nodesListGetNode(pJoin->node.pChildren, 1);
if (ORDER_ASC != pLeft->outputTsOrder && ORDER_DESC != pLeft->outputTsOrder) {
return false;
}
if (ORDER_ASC != pRight->outputTsOrder && ORDER_DESC != pRight->outputTsOrder) {
return false;
}
if (pLeft->outputTsOrder == pRight->outputTsOrder) {
return false;
}
return true;
}
static int32_t sortForJoinOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan) {
SJoinLogicNode* pJoin = (SJoinLogicNode*)optFindPossibleNode(pLogicSubplan->pNode, sortForJoinOptMayBeOptimized);
if (NULL == pJoin) {
return TSDB_CODE_SUCCESS;
}
return sortForJoinOptimizeImpl(pCxt, pLogicSubplan, pJoin);
}
static bool smaIndexOptMayBeOptimized(SLogicNode* pNode) {
if (QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(pNode) || NULL == pNode->pParent ||
QUERY_NODE_LOGIC_PLAN_WINDOW != nodeType(pNode->pParent) ||
@ -3122,6 +3249,7 @@ static bool mergeProjectsMayBeOptimized(SLogicNode* pNode) {
NULL != pChild->pConditions || NULL != pChild->pLimit || NULL != pChild->pSlimit) {
return false;
}
return true;
}
@ -3160,7 +3288,9 @@ static EDealRes mergeProjectionsExpr(SNode** pNode, void* pContext) {
static int32_t mergeProjectsOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan, SLogicNode* pSelfNode) {
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pSelfNode->pChildren, 0);
if (((SProjectLogicNode*)pChild)->ignoreGroupId) {
((SProjectLogicNode*)pSelfNode)->inputIgnoreGroup = true;
}
SMergeProjectionsContext cxt = {.pChildProj = (SProjectLogicNode*)pChild, .errCode = TSDB_CODE_SUCCESS};
nodesRewriteExprs(((SProjectLogicNode*)pSelfNode)->pProjections, mergeProjectionsExpr, &cxt);
int32_t code = cxt.errCode;
@ -3325,7 +3455,11 @@ static bool pushDownLimitTo(SLogicNode* pNodeWithLimit, SLogicNode* pNodeLimitPu
}
case QUERY_NODE_LOGIC_PLAN_SCAN:
if (nodeType(pNodeWithLimit) == QUERY_NODE_LOGIC_PLAN_PROJECT && pNodeWithLimit->pLimit) {
if (((SProjectLogicNode*)pNodeWithLimit)->inputIgnoreGroup) {
cloneLimit(pNodeWithLimit, pNodeLimitPushTo, CLONE_LIMIT);
} else {
swapLimit(pNodeWithLimit, pNodeLimitPushTo);
}
return true;
}
default:
@ -4190,6 +4324,7 @@ static const SOptimizeRule optimizeRuleSet[] = {
{.pName = "StableJoin", .optimizeFunc = stableJoinOptimize},
{.pName = "sortNonPriKeyOptimize", .optimizeFunc = sortNonPriKeyOptimize},
{.pName = "SortPrimaryKey", .optimizeFunc = sortPrimaryKeyOptimize},
{.pName = "SortForjoin", .optimizeFunc = sortForJoinOptimize},
{.pName = "SmaIndex", .optimizeFunc = smaIndexOptimize},
{.pName = "PushDownLimit", .optimizeFunc = pushDownLimitOptimize},
{.pName = "PartitionTags", .optimizeFunc = partTagsOptimize},

View File

@ -1459,6 +1459,7 @@ static int32_t createProjectPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChild
pProject->mergeDataBlock = projectCanMergeDataBlock(pProjectLogicNode);
pProject->ignoreGroupId = pProjectLogicNode->ignoreGroupId;
pProject->inputIgnoreGroup = pProjectLogicNode->inputIgnoreGroup;
int32_t code = TSDB_CODE_SUCCESS;
if (0 == LIST_LENGTH(pChildren)) {

View File

@ -1064,9 +1064,8 @@ static int32_t stbSplCreateMergeKeys(SNodeList* pSortKeys, SNodeList* pTargets,
SNode* pTarget = NULL;
bool found = false;
FOREACH(pTarget, pTargets) {
if ((QUERY_NODE_COLUMN == nodeType(pSortExpr) && nodesEqualNode((SNode*)pSortExpr, pTarget))
// || (0 == strcmp(pSortExpr->aliasName, ((SColumnNode*)pTarget)->colName))
) {
if ((QUERY_NODE_COLUMN == nodeType(pSortExpr) && nodesEqualNode((SNode*)pSortExpr, pTarget)) ||
(0 == strcmp(pSortExpr->aliasName, ((SColumnNode*)pTarget)->colName))) {
code = nodesListMakeStrictAppend(&pMergeKeys, stbSplCreateOrderByExpr(pSortKey, pTarget));
if (TSDB_CODE_SUCCESS != code) {
break;
@ -1164,8 +1163,10 @@ static int32_t stbSplSplitSortNode(SSplitContext* pCxt, SStableSplitInfo* pInfo)
static int32_t stbSplGetSplitNodeForScan(SStableSplitInfo* pInfo, SLogicNode** pSplitNode) {
*pSplitNode = pInfo->pSplitNode;
if (NULL != pInfo->pSplitNode->pParent && QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pInfo->pSplitNode->pParent) &&
NULL == pInfo->pSplitNode->pParent->pLimit && NULL == pInfo->pSplitNode->pParent->pSlimit) {
if (NULL != pInfo->pSplitNode->pParent &&
QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pInfo->pSplitNode->pParent) &&
NULL == pInfo->pSplitNode->pParent->pLimit && NULL == pInfo->pSplitNode->pParent->pSlimit &&
!((SProjectLogicNode*)pInfo->pSplitNode->pParent)->inputIgnoreGroup) {
*pSplitNode = pInfo->pSplitNode->pParent;
if (NULL != pInfo->pSplitNode->pLimit) {
(*pSplitNode)->pLimit = nodesCloneNode(pInfo->pSplitNode->pLimit);

View File

@ -1788,6 +1788,7 @@ bool getTimePseudoFuncEnv(SFunctionNode *UNUSED_PARAM(pFunc), SFuncExecEnv *pEnv
return true;
}
#ifdef BUILD_NO_CALL
int32_t qStartTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
colDataSetInt64(pOutput->columnData, pOutput->numOfRows, (int64_t *)colDataGetData(pInput->columnData, 0));
return TSDB_CODE_SUCCESS;
@ -1797,6 +1798,7 @@ int32_t qEndTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu
colDataSetInt64(pOutput->columnData, pOutput->numOfRows, (int64_t *)colDataGetData(pInput->columnData, 1));
return TSDB_CODE_SUCCESS;
}
#endif
int32_t winDurFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
colDataSetInt64(pOutput->columnData, pOutput->numOfRows, (int64_t *)colDataGetData(pInput->columnData, 2));
@ -1824,7 +1826,7 @@ int32_t qTbnameFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pO
pOutput->numOfRows += pInput->numOfRows;
return TSDB_CODE_SUCCESS;
}
#ifdef BUILD_NO_CALL
int32_t qTbUidFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
char* p = colDataGetNumData(pInput->columnData, 0);
@ -1848,7 +1850,7 @@ int32_t qVgIdFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOut
pOutput->numOfRows += pInput->numOfRows;
return TSDB_CODE_SUCCESS;
}
#endif
/** Aggregation functions **/
int32_t countScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
@ -2427,9 +2429,19 @@ int32_t leastSQRScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarPa
matrix12 /= matrix[1][1];
char buf[64] = {0};
size_t len = snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{slop:%.6lf, intercept:%.6lf}", matrix02,
matrix12);
char buf[LEASTSQUARES_BUFF_LENGTH] = {0};
char slopBuf[64] = {0};
char interceptBuf[64] = {0};
int n = snprintf(slopBuf, 64, "%.6lf", matrix02);
if (n > LEASTSQUARES_DOUBLE_ITEM_LENGTH) {
snprintf(slopBuf, 64, "%." DOUBLE_PRECISION_DIGITS, matrix02);
}
n = snprintf(interceptBuf, 64, "%.6lf", matrix12);
if (n > LEASTSQUARES_DOUBLE_ITEM_LENGTH) {
snprintf(interceptBuf, 64, "%." DOUBLE_PRECISION_DIGITS, matrix12);
}
size_t len =
snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{slop:%s, intercept:%s}", slopBuf, interceptBuf);
varDataSetLen(buf, len);
colDataSetVal(pOutputData, 0, buf, false);
}

View File

@ -386,10 +386,13 @@ _return:
int32_t schDumpJobExecRes(SSchJob *pJob, SExecResult *pRes) {
pRes->code = atomic_load_32(&pJob->errCode);
pRes->numOfRows = pJob->resNumOfRows;
SCH_LOCK(SCH_WRITE, &pJob->resLock);
pRes->res = pJob->execRes.res;
pRes->msgType = pJob->execRes.msgType;
pRes->numOfBytes = pJob->execRes.numOfBytes;
pJob->execRes.res = NULL;
SCH_UNLOCK(SCH_WRITE, &pJob->resLock);
SCH_JOB_DLOG("execRes dumped, code: %s", tstrerror(pRes->code));

View File

@ -48,8 +48,8 @@ extern "C" {
#define stError(...) do { if (stDebugFlag & DEBUG_ERROR) { taosPrintLog("STM ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0)
#define stWarn(...) do { if (stDebugFlag & DEBUG_WARN) { taosPrintLog("STM WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0)
#define stInfo(...) do { if (stDebugFlag & DEBUG_INFO) { taosPrintLog("STM ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
#define stDebug(...) do { if (stDebugFlag & DEBUG_DEBUG) { taosPrintLog("STM ", DEBUG_DEBUG, tqDebugFlag, __VA_ARGS__); }} while(0)
#define stTrace(...) do { if (stDebugFlag & DEBUG_TRACE) { taosPrintLog("STM ", DEBUG_TRACE, tqDebugFlag, __VA_ARGS__); }} while(0)
#define stDebug(...) do { if (stDebugFlag & DEBUG_DEBUG) { taosPrintLog("STM ", DEBUG_DEBUG, stDebugFlag, __VA_ARGS__); }} while(0)
#define stTrace(...) do { if (stDebugFlag & DEBUG_TRACE) { taosPrintLog("STM ", DEBUG_TRACE, stDebugFlag, __VA_ARGS__); }} while(0)
// clang-format on
typedef struct {

View File

@ -500,34 +500,27 @@ int32_t backendCopyFiles(char* src, char* dst) {
// return 0;
}
int32_t rebuildFromLocalChkp(char* key, char* chkpPath, int64_t chkpId, char* defaultPath) {
int32_t code = -1;
int32_t len = strlen(defaultPath) + 32;
char* tmp = taosMemoryCalloc(1, len);
sprintf(tmp, "%s%s", defaultPath, "_tmp");
if (taosIsDir(tmp)) taosRemoveDir(tmp);
if (taosIsDir(defaultPath)) taosRenameFile(defaultPath, tmp);
if (taosIsDir(chkpPath) && isValidCheckpoint(chkpPath)) {
if (taosIsDir(tmp)) {
taosRemoveDir(tmp);
}
int32_t code = 0;
if (taosIsDir(defaultPath)) {
taosRemoveDir(defaultPath);
taosMkDir(defaultPath);
stInfo("succ to clear stream backend %s", defaultPath);
}
if (taosIsDir(chkpPath) && isValidCheckpoint(chkpPath)) {
code = backendCopyFiles(chkpPath, defaultPath);
if (code != 0) {
stError("failed to restart stream backend from %s, reason: %s", chkpPath, tstrerror(TAOS_SYSTEM_ERROR(errno)));
taosRemoveDir(defaultPath);
taosMkDir(defaultPath);
stError("failed to restart stream backend from %s, reason: %s, start to restart from empty path: %s", chkpPath,
tstrerror(TAOS_SYSTEM_ERROR(errno)), defaultPath);
code = 0;
} else {
stInfo("start to restart stream backend at checkpoint path: %s", chkpPath);
}
}
if (code != 0) {
if (taosIsDir(defaultPath)) taosRemoveDir(defaultPath);
if (taosIsDir(tmp)) taosRenameFile(tmp, defaultPath);
} else {
taosRemoveDir(tmp);
}
taosMemoryFree(tmp);
return code;
}

Some files were not shown because too many files have changed in this diff Show More