diff --git a/Jenkinsfile2 b/Jenkinsfile2 index db49ab27d7..a2b55e3acc 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -269,7 +269,7 @@ pipeline { } } stage('linux test') { - agent{label " slave3_0 || slave15 || slave16 || slave17 "} + agent{label " worker03 || slave215 || slave217 || slave219 "} options { skipDefaultCheckout() } when { changeRequest() @@ -287,9 +287,9 @@ pipeline { ''' sh ''' cd ${WKC}/tests/parallel_test - export DEFAULT_RETRY_TIME=1 + export DEFAULT_RETRY_TIME=2 date - timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME} -l ${WKDIR}/log -o 480 + timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 480 ''' } } diff --git a/docs-cn/12-taos-sql/index.md b/docs-cn/12-taos-sql/index.md index 269bc1d2b5..cb01b3a918 100644 --- a/docs-cn/12-taos-sql/index.md +++ b/docs-cn/12-taos-sql/index.md @@ -7,8 +7,6 @@ description: "TAOS SQL 支持的语法规则、主要查询功能、支持的 SQ TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 为了便于用户快速上手,在一定程度上提供与标准 SQL 类似的风格和模式。严格意义上,TAOS SQL 并不是也不试图提供标准的 SQL 语法。此外,由于 TDengine 针对的时序性结构化数据不提供删除功能,因此在 TAO SQL 中不提供数据删除的相关功能。 -TAOS SQL 不支持关键字的缩写,例如 DESCRIBE 不能缩写为 DESC。 - 本章节 SQL 语法遵循如下约定: - <\> 里的内容是用户需要输入的,但不要输入 <\> 本身 @@ -37,4 +35,4 @@ import DocCardList from '@theme/DocCardList'; import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; -``` \ No newline at end of file +``` diff --git a/docs-en/12-taos-sql/04-stable.md b/docs-en/12-taos-sql/04-stable.md index 7354484f75..b8a608792a 100644 --- a/docs-en/12-taos-sql/04-stable.md +++ b/docs-en/12-taos-sql/04-stable.md @@ -9,7 +9,7 @@ Keyword `STable`, abbreviated for super table, is supported since version 2.0.15 ::: -## Crate STable +## Create STable ``` CREATE STable [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]); @@ -19,7 +19,7 @@ The SQL statement of creating a STable is similar to that of creating a table, b :::info -1. The tag types specified in TAGS should NOT be timestamp. Since 2.1.3.0 timestamp type can be used in TAGS column, but its value must be fixed and arithmetic operation can't be applied on it. +1. A tag can be of type timestamp, since version 2.1.3.0, but its value must be fixed and arithmetic operations cannot be performed on it. Prior to version 2.1.3.0, tag types specified in TAGS could not be of type timestamp. 2. The tag names specified in TAGS should NOT be the same as other columns. 3. The tag names specified in TAGS should NOT be the same as any reserved keywords.(Please refer to [keywords](/taos-sql/keywords/) 4. The maximum number of tags specified in TAGS is 128, there must be at least one tag, and the total length of all tag columns should NOT exceed 16KB. @@ -76,7 +76,7 @@ ALTER STable stb_name DROP COLUMN field_name; ALTER STable stb_name MODIFY COLUMN field_name data_type(length); ``` -This command can be used to change (or increase, more specifically) the length of a column of variable length types, like BINARY or NCHAR. +This command can be used to change (or more specifically, increase) the length of a column of variable length types, like BINARY or NCHAR. ## Change Tags of A STable @@ -94,7 +94,7 @@ This command is used to add a new tag for a STable and specify the tag type. ALTER STable stb_name DROP TAG tag_name; ``` -The tag will be removed automatically from all the subtables created using the super table as template once a tag is removed from a super table. +The tag will be removed automatically from all the subtables, created using the super table as template, once a tag is removed from a super table. ### Change A Tag @@ -102,7 +102,7 @@ The tag will be removed automatically from all the subtables created using the s ALTER STable stb_name CHANGE TAG old_tag_name new_tag_name; ``` -The tag name will be changed automatically for all the subtables created using the super table as template once a tag name is changed for a super table. +The tag name will be changed automatically for all the subtables, created using the super table as template, once a tag name is changed for a super table. ### Change Tag Length @@ -110,7 +110,7 @@ The tag name will be changed automatically for all the subtables created using t ALTER STable stb_name MODIFY TAG tag_name data_type(length); ``` -This command can be used to change (or increase, more specifically) the length of a tag of variable length types, like BINARY or NCHAR. +This command can be used to change (or more specifically, increase) the length of a tag of variable length types, like BINARY or NCHAR. :::note Changing tag values can be applied to only subtables. All other tag operations, like add tag, remove tag, however, can be applied to only STable. If a new tag is added for a STable, the tag will be added with NULL value for all its subtables. diff --git a/docs-en/12-taos-sql/06-select.md b/docs-en/12-taos-sql/06-select.md index d9c39845f8..8a017cf92e 100644 --- a/docs-en/12-taos-sql/06-select.md +++ b/docs-en/12-taos-sql/06-select.md @@ -21,7 +21,7 @@ SELECT select_expr [, select_expr ...] ## Wildcard -Wilcard \* can be used to specify all columns. The result includes only data columns for normal tables. +Wildcard \* can be used to specify all columns. The result includes only data columns for normal tables. ``` taos> SELECT * FROM d1001; @@ -51,14 +51,14 @@ taos> SELECT * FROM meters; Query OK, 9 row(s) in set (0.002022s) ``` -Wildcard can be used with table name as prefix, both below SQL statements have same effects and return all columns. +Wildcard can be used with table name as prefix. Both SQL statements below have the same effect and return all columns. ```SQL SELECT * FROM d1001; SELECT d1001.* FROM d1001; ``` -In JOIN query, however, with or without table name prefix will return different results. \* without table prefix will return all the columns of both tables, but \* with table name as prefix will return only the columns of that table. +In a JOIN query, however, the results are different with or without a table name prefix. \* without table prefix will return all the columns of both tables, but \* with table name as prefix will return only the columns of that table. ``` taos> SELECT * FROM d1001, d1003 WHERE d1001.ts=d1003.ts; @@ -76,7 +76,7 @@ taos> SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts; Query OK, 1 row(s) in set (0.020443s) ``` -Wilcard \* can be used with some functions, but the result may be different depending on the function being used. For example, `count(*)` returns only one column, i.e. the number of rows; `first`, `last` and `last_row` return all columns of the selected row. +Wildcard \* can be used with some functions, but the result may be different depending on the function being used. For example, `count(*)` returns only one column, i.e. the number of rows; `first`, `last` and `last_row` return all columns of the selected row. ``` taos> SELECT COUNT(*) FROM d1001; @@ -96,7 +96,7 @@ Query OK, 1 row(s) in set (0.000849s) ## Tags -Starting from version 2.0.14, tag columns can be selected together with data columns when querying sub tables. Please note that, however, wildcard \* doesn't represent any tag column, that means tag columns must be specified explicitly like the example below. +Starting from version 2.0.14, tag columns can be selected together with data columns when querying sub tables. Please note however, that, wildcard \* cannot be used to represent any tag column. This means that tag columns must be specified explicitly like the example below. ``` taos> SELECT location, groupid, current FROM d1001 LIMIT 2; @@ -109,7 +109,7 @@ Query OK, 2 row(s) in set (0.003112s) ## Get distinct values -`DISTINCT` keyword can be used to get all the unique values of tag columns from a super table, it can also be used to get all the unique values of data columns from a table or subtable. +`DISTINCT` keyword can be used to get all the unique values of tag columns from a super table. It can also be used to get all the unique values of data columns from a table or subtable. ```sql SELECT DISTINCT tag_name [, tag_name ...] FROM stb_name; @@ -118,15 +118,15 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name; :::info -1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output. -2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision nature of floating numbers. +1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1,000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output. +2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision errors in floating point numbers. 3. `DISTINCT` can't be used in the sub-query of a nested query statement, and can't be used together with aggregate functions, `GROUP BY` or `JOIN` in the same SQL statement. ::: ## Columns Names of Result Set -When using `SELECT`, the column names in the result set will be same as that in the select clause if `AS` is not used. `AS` can be used to rename the column names in the result set. For example +When using `SELECT`, the column names in the result set will be the same as that in the select clause if `AS` is not used. `AS` can be used to rename the column names in the result set. For example ``` taos> SELECT ts, ts AS primary_key_ts FROM d1001; @@ -161,7 +161,7 @@ SELECT * FROM d1001; ## Special Query -Some special query functionalities can be performed without `FORM` sub-clause. For example, below statement can be used to get the current database in use. +Some special query functions can be invoked without `FROM` sub-clause. For example, the statement below can be used to get the current database in use. ``` taos> SELECT DATABASE(); @@ -181,7 +181,7 @@ taos> SELECT DATABASE(); Query OK, 1 row(s) in set (0.000184s) ``` -Below statement can be used to get the version of client or server. +The statement below can be used to get the version of client or server. ``` taos> SELECT CLIENT_VERSION(); @@ -197,7 +197,7 @@ taos> SELECT SERVER_VERSION(); Query OK, 1 row(s) in set (0.000077s) ``` -Below statement is used to check the server status. One integer, like `1`, is returned if the server status is OK, otherwise an error code is returned. This is compatible with the status check for TDengine from connection pool or 3rd party tools, and can avoid the problem of losing the connection from a connection pool when using the wrong heartbeat checking SQL statement. +The statement below is used to check the server status. An integer, like `1`, is returned if the server status is OK, otherwise an error code is returned. This is compatible with the status check for TDengine from connection pool or 3rd party tools, and can avoid the problem of losing the connection from a connection pool when using the wrong heartbeat checking SQL statement. ``` taos> SELECT SERVER_STATUS(); @@ -284,7 +284,7 @@ taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; Query OK, 1 row(s) in set (0.001091s) ``` -- Wildcard \* can be used to get all columns, or specific column names can be specified. Arithmetic operation can be performed on columns of number types, columns can be renamed in the result set. +- Wildcard \* can be used to get all columns, or specific column names can be specified. Arithmetic operation can be performed on columns of numerical types, columns can be renamed in the result set. - Arithmetic operation on columns can't be used in where clause. For example, `where a*2>6;` is not allowed but `where a>6/2;` can be used instead for the same purpose. - Arithmetic operation on columns can't be used as the objectives of select statement. For example, `select min(2*a) from t;` is not allowed but `select 2*min(a) from t;` can be used instead. - Logical operation can be used in `WHERE` clause to filter numeric values, wildcard can be used to filter string values. @@ -318,13 +318,13 @@ Logical operations in below table can be used in the `where` clause to filter th - Operator `like` is used together with wildcards to match strings - '%' matches 0 or any number of characters, '\_' matches any single ASCII character. - `\_` is used to match the \_ in the string. - - The maximum length of wildcard string is 100 bytes from version 2.1.6.1 (before that the maximum length is 20 bytes). `maxWildCardsLength` in `taos.cfg` can be used to control this threshold. Too long wildcard string may slowdown the execution performance of `LIKE` operator. + - The maximum length of wildcard string is 100 bytes from version 2.1.6.1 (before that the maximum length is 20 bytes). `maxWildCardsLength` in `taos.cfg` can be used to control this threshold. A very long wildcard string may slowdown the execution performance of `LIKE` operator. - `AND` keyword can be used to filter multiple columns simultaneously. AND/OR operation can be performed on single or multiple columns from version 2.3.0.0. However, before 2.3.0.0 `OR` can't be used on multiple columns. - For timestamp column, only one condition can be used; for other columns or tags, `OR` keyword can be used to combine multiple logical operators. For example, `((value > 20 AND value < 30) OR (value < 12))`. - From version 2.3.0.0, multiple conditions can be used on timestamp column, but the result set can only contain single time range. - From version 2.0.17.0, operator `BETWEEN AND` can be used in where clause, for example `WHERE col2 BETWEEN 1.5 AND 3.25` means the filter condition is equal to "1.5 ≤ col2 ≤ 3.25". -- From version 2.1.4.0, operator `IN` can be used in the where clause. For example, `WHERE city IN ('California.SanFrancisco', 'California.SanDiego')`. For bool type, both `{true, false}` and `{0, 1}` are allowed, but integers other than 0 or 1 are not allowed. FLOAT and DOUBLE types are impacted by floating precision, only values that match the condition within the tolerance will be selected. Non-primary key column of timestamp type can be used with `IN`. -- From version 2.3.0.0, regular expression is supported in the where clause with keyword `match` or `nmatch`, the regular expression is case insensitive. +- From version 2.1.4.0, operator `IN` can be used in the where clause. For example, `WHERE city IN ('California.SanFrancisco', 'California.SanDiego')`. For bool type, both `{true, false}` and `{0, 1}` are allowed, but integers other than 0 or 1 are not allowed. FLOAT and DOUBLE types are impacted by floating point precision errors. Only values that match the condition within the tolerance will be selected. Non-primary key column of timestamp type can be used with `IN`. +- From version 2.3.0.0, regular expression is supported in the where clause with keyword `match` or `nmatch`. The regular expression is case insensitive. ## Regular Expression @@ -364,7 +364,7 @@ FROM temp_STable t1, temp_STable t2 WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0; ``` -Similary, join operation can be performed on the result set of multiple sub queries. +Similarly, join operations can be performed on the result set of multiple sub queries. :::note Restrictions on join operation: @@ -380,7 +380,7 @@ Restrictions on join operation: ## Nested Query -Nested query is also called sub query, that means in a single SQL statement the result of inner query can be used as the data source of the outer query. +Nested query is also called sub query. This means that in a single SQL statement the result of inner query can be used as the data source of the outer query. From 2.2.0.0, unassociated sub query can be used in the `FROM` clause. Unassociated means the sub query doesn't use the parameters in the parent query. More specifically, in the `tb_name_list` of `SELECT` statement, an independent SELECT statement can be used. So a complete nested query looks like: @@ -390,14 +390,14 @@ SELECT ... FROM (SELECT ... FROM ...) ...; :::info -- Only one layer of nesting is allowed, that means no sub query is allowed in a sub query -- The result set returned by the inner query will be used as a "virtual table" by the outer query, the "virtual table" can be renamed using `AS` keyword for easy reference in the outer query. +- Only one layer of nesting is allowed, that means no sub query is allowed within a sub query +- The result set returned by the inner query will be used as a "virtual table" by the outer query. The "virtual table" can be renamed using `AS` keyword for easy reference in the outer query. - Sub query is not allowed in continuous query. - JOIN operation is allowed between tables/STables inside both inner and outer queries. Join operation can be performed on the result set of the inner query. - UNION operation is not allowed in either inner query or outer query. -- The functionalities that can be used in the inner query is same as non-nested query. - - `ORDER BY` inside the inner query doesn't make any sense but will slow down the query performance significantly, so please avoid such usage. -- Compared to the non-nested query, the functionalities that can be used in the outer query have such restrictions as: +- The functions that can be used in the inner query are the same as those that can be used in a non-nested query. + - `ORDER BY` inside the inner query is unnecessary and will slow down the query performance significantly. It is best to avoid the use of `ORDER BY` inside the inner query. +- Compared to the non-nested query, the functionality that can be used in the outer query has the following restrictions: - Functions - If the result set returned by the inner query doesn't contain timestamp column, then functions relying on timestamp can't be used in the outer query, like `TOP`, `BOTTOM`, `FIRST`, `LAST`, `DIFF`. - Functions that need to scan the data twice can't be used in the outer query, like `STDDEV`, `PERCENTILE`. @@ -442,8 +442,8 @@ The sum of col1 and col2 for rows later than 2018-06-01 08:00:00.000 and whose c SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND col2 > 1.2 LIMIT 10 OFFSET 5; ``` -The rows in the past 10 minutes and whose col2 is bigger than 3.14 are selected and output to the result file `/home/testoutpu.csv` with below SQL statement: +The rows in the past 10 minutes and whose col2 is bigger than 3.14 are selected and output to the result file `/home/testoutput.csv` with below SQL statement: ```SQL -SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutpu.csv; +SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutput.csv; ``` diff --git a/docs-en/12-taos-sql/index.md b/docs-en/12-taos-sql/index.md index 39482d9786..33656338a7 100644 --- a/docs-en/12-taos-sql/index.md +++ b/docs-en/12-taos-sql/index.md @@ -7,8 +7,6 @@ This section explains the syntax of SQL to perform operations on databases, tabl TDengine SQL is the major interface for users to write data into or query from TDengine. For ease of use, the syntax is similar to that of standard SQL. However, please note that TDengine SQL is not standard SQL. For instance, TDengine doesn't provide a delete function for time series data and so corresponding statements are not provided in TDengine SQL. -TDengine SQL doesn't support abbreviation for keywords. For example "SELECT" cannot be abbreviated as "SEL". - Syntax Specifications used in this chapter: - The content inside <\> needs to be input by the user, excluding <\> itself. diff --git a/docs-en/14-reference/07-tdinsight/index.md b/docs-en/14-reference/07-tdinsight/index.md index e945d581c9..16bae615c0 100644 --- a/docs-en/14-reference/07-tdinsight/index.md +++ b/docs-en/14-reference/07-tdinsight/index.md @@ -61,7 +61,7 @@ sudo yum install \ ## Automated deployment of TDinsight -We provide an installation script [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh) script to allow users to configure the installation automatically and quickly. +We provide an installation script [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh) to allow users to configure the installation automatically and quickly. You can download the script via `wget` or other tools: @@ -300,7 +300,7 @@ This section contains the current information and status of the cluster, the ale ![TDengine Database TDinsight mnodes overview](./assets/TDinsight-3-mnodes.webp) -1. **MNodes Status**: a simple table view of `show mnodes`. 2. +1. **MNodes Status**: a simple table view of `show mnodes`. 2. **MNodes Number**: similar to `DNodes Number`, the number of MNodes changes. ### Request @@ -317,9 +317,9 @@ This section contains the current information and status of the cluster, the ale Database usage, repeated for each value of the variable `$database` i.e. multiple rows per database. -1. **STables**: number of super tables. 2. -2. **Total Tables**: number of all tables. 3. -3. **Sub Tables**: the number of all super table sub-tables. 4. +1. **STables**: number of super tables. +2. **Total Tables**: number of all tables. +3. **Sub Tables**: the number of all super table subtables. 4. **Tables**: graph of all normal table numbers over time. 5. **Tables Number Foreach VGroups**: The number of tables contained in each VGroups. @@ -330,18 +330,18 @@ Database usage, repeated for each value of the variable `$database` i.e. multipl Data node resource usage display with repeated multiple rows for the variable `$fqdn` i.e., each data node. Includes. 1. **Uptime**: the time elapsed since the dnode was created. -2. **Has MNodes?**: whether the current dnode is a mnode. 3. -3. **CPU Cores**: the number of CPU cores. 4. -4. **VNodes Number**: the number of VNodes in the current dnode. 5. -5. **VNodes Masters**: the number of vnodes in the master role. 6. +2. **Has MNodes?**: whether the current dnode is a mnode. +3. **CPU Cores**: the number of CPU cores. +4. **VNodes Number**: the number of VNodes in the current dnode. +5. **VNodes Masters**: the number of vnodes in the master role. 6. **Current CPU Usage of taosd**: CPU usage rate of taosd processes. 7. **Current Memory Usage of taosd**: memory usage of taosd processes. 8. **Disk Used**: The total disk usage percentage of the taosd data directory. -9. **CPU Usage**: Process and system CPU usage. 10. +9. **CPU Usage**: Process and system CPU usage. 10. **RAM Usage**: Time series view of RAM usage metrics. 11. **Disk Used**: Disks used at each level of multi-level storage (default is level0). 12. **Disk Increasing Rate per Minute**: Percentage increase or decrease in disk usage per minute. -13. **Disk IO**: Disk IO rate. 14. +13. **Disk IO**: Disk IO rate. 14. **Net IO**: Network IO, the aggregate network IO rate in addition to the local network. ### Login History @@ -376,7 +376,7 @@ TDinsight installed via the `TDinsight.sh` script can be cleaned up using the co To completely uninstall TDinsight during a manual installation, you need to clean up the following. 1. the TDinsight Dashboard in Grafana. -2. the Data Source in Grafana. 3. +2. the Data Source in Grafana. 3. remove the `tdengine-datasource` plugin from the plugin installation directory. ## Integrated Docker Example diff --git a/docs-en/20-third-party/01-grafana.mdx b/docs-en/20-third-party/01-grafana.mdx index ce45a12a04..dc2033ae6f 100644 --- a/docs-en/20-third-party/01-grafana.mdx +++ b/docs-en/20-third-party/01-grafana.mdx @@ -23,7 +23,7 @@ You can download The Grafana plugin for TDengine from = $from and ts < $to interval($interval)`, where, from, to and interval are built-in variables of the TDengine plugin, indicating the range and time interval of queries fetched from the Grafana plugin panel. In addition to the built-in variables, ` custom template variables are also supported. +- INPUT SQL: enter the statement to be queried (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)`, where, from, to and interval are built-in variables of the TDengine plugin, indicating the range and time interval of queries fetched from the Grafana plugin panel. In addition to the built-in variables, custom template variables are also supported. - ALIAS BY: This allows you to set the current query alias. - GENERATE SQL: Clicking this button will automatically replace the corresponding variables and generate the final executed statement. diff --git a/docs-en/20-third-party/09-emq-broker.md b/docs-en/20-third-party/09-emq-broker.md index ae393bb085..738372cabd 100644 --- a/docs-en/20-third-party/09-emq-broker.md +++ b/docs-en/20-third-party/09-emq-broker.md @@ -3,7 +3,7 @@ sidebar_label: EMQX Broker title: EMQX Broker writing --- -MQTT is a popular IoT data transfer protocol, [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software, without any code, only need to use "rules" in EMQX Dashboard to do simple configuration. You can write MQTT data directly to TDengine. EMQX supports saving data to TDengine by sending it to web services and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it. tdengine). +MQTT is a popular IoT data transfer protocol, [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software, you can write MQTT data directly to TDengine without any code, you only need to use "rules" in EMQX Dashboard to create a simple configuration. EMQX supports saving data to TDengine by sending it to web services and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it.). ## Prerequisites diff --git a/docs-en/20-third-party/11-kafka.md b/docs-en/20-third-party/11-kafka.md index 155635c231..9c78a6645a 100644 --- a/docs-en/20-third-party/11-kafka.md +++ b/docs-en/20-third-party/11-kafka.md @@ -228,7 +228,7 @@ taos> select * from meters; Query OK, 4 row(s) in set (0.004208s) ``` -If you see the above data, the synchronization is successful. If not, check the logs of Kafka Connect. For detailed description of configuration parameters, see [Configuration Reference](#Configuration Reference). +If you see the above data, the synchronization is successful. If not, check the logs of Kafka Connect. For detailed description of configuration parameters, see [Configuration Reference](#configuration-reference). ## The use of TDengine Source Connector diff --git a/docs-examples/c/async_query_example.c b/docs-examples/c/async_query_example.c index 262757f02b..b370420b12 100644 --- a/docs-examples/c/async_query_example.c +++ b/docs-examples/c/async_query_example.c @@ -182,14 +182,14 @@ int main() { // query callback ... // ts current voltage phase location groupid // numOfRow = 8 -// 1538548685000 10.300000 219 0.310000 beijing.chaoyang 2 -// 1538548695000 12.600000 218 0.330000 beijing.chaoyang 2 -// 1538548696800 12.300000 221 0.310000 beijing.chaoyang 2 -// 1538548696650 10.300000 218 0.250000 beijing.chaoyang 3 -// 1538548685500 11.800000 221 0.280000 beijing.haidian 2 -// 1538548696600 13.400000 223 0.290000 beijing.haidian 2 -// 1538548685000 10.800000 223 0.290000 beijing.haidian 3 -// 1538548686500 11.500000 221 0.350000 beijing.haidian 3 +// 1538548685500 11.800000 221 0.280000 california.losangeles 2 +// 1538548696600 13.400000 223 0.290000 california.losangeles 2 +// 1538548685000 10.800000 223 0.290000 california.losangeles 3 +// 1538548686500 11.500000 221 0.350000 california.losangeles 3 +// 1538548685000 10.300000 219 0.310000 california.sanfrancisco 2 +// 1538548695000 12.600000 218 0.330000 california.sanfrancisco 2 +// 1538548696800 12.300000 221 0.310000 california.sanfrancisco 2 +// 1538548696650 10.300000 218 0.250000 california.sanfrancisco 3 // numOfRow = 0 // no more data, close the connection. // ANCHOR_END: demo \ No newline at end of file diff --git a/docs-examples/csharp/AsyncQueryExample.cs b/docs-examples/csharp/AsyncQueryExample.cs index fe30d21efe..3dabbebd16 100644 --- a/docs-examples/csharp/AsyncQueryExample.cs +++ b/docs-examples/csharp/AsyncQueryExample.cs @@ -224,15 +224,15 @@ namespace TDengineExample } //output: -//Connect to TDengine success -//8 rows async retrieved +// Connect to TDengine success +// 8 rows async retrieved -//1538548685000 | 10.3 | 219 | 0.31 | beijing.chaoyang | 2 | -//1538548695000 | 12.6 | 218 | 0.33 | beijing.chaoyang | 2 | -//1538548696800 | 12.3 | 221 | 0.31 | beijing.chaoyang | 2 | -//1538548696650 | 10.3 | 218 | 0.25 | beijing.chaoyang | 3 | -//1538548685500 | 11.8 | 221 | 0.28 | beijing.haidian | 2 | -//1538548696600 | 13.4 | 223 | 0.29 | beijing.haidian | 2 | -//1538548685000 | 10.8 | 223 | 0.29 | beijing.haidian | 3 | -//1538548686500 | 11.5 | 221 | 0.35 | beijing.haidian | 3 | -//async retrieve complete. \ No newline at end of file +// 1538548685500 | 11.8 | 221 | 0.28 | california.losangeles | 2 | +// 1538548696600 | 13.4 | 223 | 0.29 | california.losangeles | 2 | +// 1538548685000 | 10.8 | 223 | 0.29 | california.losangeles | 3 | +// 1538548686500 | 11.5 | 221 | 0.35 | california.losangeles | 3 | +// 1538548685000 | 10.3 | 219 | 0.31 | california.sanfrancisco | 2 | +// 1538548695000 | 12.6 | 218 | 0.33 | california.sanfrancisco | 2 | +// 1538548696800 | 12.3 | 221 | 0.31 | california.sanfrancisco | 2 | +// 1538548696650 | 10.3 | 218 | 0.25 | california.sanfrancisco | 3 | +// async retrieve complete. \ No newline at end of file diff --git a/docs-examples/python/conn_native_pandas.py b/docs-examples/python/conn_native_pandas.py index 314759f766..56942ef570 100644 --- a/docs-examples/python/conn_native_pandas.py +++ b/docs-examples/python/conn_native_pandas.py @@ -13,7 +13,7 @@ print(df.head(3)) # output: # RangeIndex(start=0, stop=8, step=1) # -# ts current voltage phase location groupid -# 0 2018-10-03 14:38:05.000 10.3 219 0.31 beijing.chaoyang 2 -# 1 2018-10-03 14:38:15.000 12.6 218 0.33 beijing.chaoyang 2 -# 2 2018-10-03 14:38:16.800 12.3 221 0.31 beijing.chaoyang 2 +# ts current ... location groupid +# 0 2018-10-03 14:38:05.500 11.8 ... california.losangeles 2 +# 1 2018-10-03 14:38:16.600 13.4 ... california.losangeles 2 +# 2 2018-10-03 14:38:05.000 10.8 ... california.losangeles 3 diff --git a/docs-examples/python/conn_rest_pandas.py b/docs-examples/python/conn_rest_pandas.py index 143e4275fa..0164080cd5 100644 --- a/docs-examples/python/conn_rest_pandas.py +++ b/docs-examples/python/conn_rest_pandas.py @@ -11,9 +11,9 @@ print(type(df.ts[0])) print(df.head(3)) # output: -# # RangeIndex(start=0, stop=8, step=1) -# ts current ... location groupid -# 0 2018-10-03 14:38:05+08:00 10.3 ... beijing.chaoyang 2 -# 1 2018-10-03 14:38:15+08:00 12.6 ... beijing.chaoyang 2 -# 2 2018-10-03 14:38:16.800000+08:00 12.3 ... beijing.chaoyang 2 +# +# ts current ... location groupid +# 0 2018-10-03 06:38:05.500000+00:00 11.8 ... california.losangeles 2 +# 1 2018-10-03 06:38:16.600000+00:00 13.4 ... california.losangeles 2 +# 2 2018-10-03 06:38:05+00:00 10.8 ... california.losangeles 3 diff --git a/docs-examples/python/connect_rest_examples.py b/docs-examples/python/connect_rest_examples.py index 94e7d5f467..3303eb0e19 100644 --- a/docs-examples/python/connect_rest_examples.py +++ b/docs-examples/python/connect_rest_examples.py @@ -38,8 +38,7 @@ for row in data: # inserted row count: 8 # queried row count: 3 # ['ts', 'current', 'voltage', 'phase', 'location', 'groupid'] -# [datetime.datetime(2018, 10, 3, 14, 38, 5, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 10.3, 219, 0.31, 'beijing.chaoyang', 2] -# [datetime.datetime(2018, 10, 3, 14, 38, 15, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 12.6, 218, 0.33, 'beijing.chaoyang', 2] -# [datetime.datetime(2018, 10, 3, 14, 38, 16, 800000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 12.3, 221, 0.31, 'beijing.chaoyang', 2] - +# [datetime.datetime(2018, 10, 3, 14, 38, 5, 500000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 11.8, 221, 0.28, 'california.losangeles', 2] +# [datetime.datetime(2018, 10, 3, 14, 38, 16, 600000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 13.4, 223, 0.29, 'california.losangeles', 2] +# [datetime.datetime(2018, 10, 3, 14, 38, 5, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 10.8, 223, 0.29, 'california.losangeles', 3] # ANCHOR_END: basic diff --git a/docs-examples/python/json_protocol_example.py b/docs-examples/python/json_protocol_example.py index bdf324f706..58b38f3ff6 100644 --- a/docs-examples/python/json_protocol_example.py +++ b/docs-examples/python/json_protocol_example.py @@ -5,9 +5,9 @@ from taos import SmlProtocol, SmlPrecision lines = [{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, - "tags": {"location": "California.LosAngeles", "groupid": 1}}, + "tags": {"location": "California.LosAngeles", "groupid": 1}}, {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, - "tags": {"location": "California.SanFrancisco", "groupid": 2}}, + "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}] diff --git a/docs-examples/python/query_example.py b/docs-examples/python/query_example.py index de5f26784c..8afd7f0735 100644 --- a/docs-examples/python/query_example.py +++ b/docs-examples/python/query_example.py @@ -12,10 +12,10 @@ def query_api_demo(conn: taos.TaosConnection): # field count: 7 -# meta of files[1]: {name: ts, type: 9, bytes: 8} +# meta of fields[1]: {name: ts, type: 9, bytes: 8} # ======================Iterate on result========================= -# ('d1001', datetime.datetime(2018, 10, 3, 14, 38, 5), 10.300000190734863, 219, 0.3100000023841858, 'California.SanFrancisco', 2) -# ('d1001', datetime.datetime(2018, 10, 3, 14, 38, 15), 12.600000381469727, 218, 0.33000001311302185, 'California.SanFrancisco', 2) +# ('d1003', datetime.datetime(2018, 10, 3, 14, 38, 5, 500000), 11.800000190734863, 221, 0.2800000011920929, 'california.losangeles', 2) +# ('d1003', datetime.datetime(2018, 10, 3, 14, 38, 16, 600000), 13.399999618530273, 223, 0.28999999165534973, 'california.losangeles', 2) # ANCHOR_END: iter # ANCHOR: fetch_all @@ -29,8 +29,8 @@ def fetch_all_demo(conn: taos.TaosConnection): # row count: 2 # ===============all data=================== -# [{'ts': datetime.datetime(2018, 10, 3, 14, 38, 5), 'current': 10.300000190734863}, -# {'ts': datetime.datetime(2018, 10, 3, 14, 38, 15), 'current': 12.600000381469727}] +# [{'ts': datetime.datetime(2018, 10, 3, 14, 38, 5, 500000), 'current': 11.800000190734863}, +# {'ts': datetime.datetime(2018, 10, 3, 14, 38, 16, 600000), 'current': 13.399999618530273}] # ANCHOR_END: fetch_all if __name__ == '__main__': diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 45745403f3..88fa0e728f 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -105,12 +105,14 @@ typedef struct SColumnInfoData { } SColumnInfoData; typedef struct SQueryTableDataCond { - STimeWindow twindow; + //STimeWindow twindow; int32_t order; // desc|asc order to iterate the data block int32_t numOfCols; SColumnInfo *colList; bool loadExternalRows; // load external rows or not int32_t type; // data block load type: + int32_t numOfTWindows; + STimeWindow *twindows; } SQueryTableDataCond; void* blockDataDestroy(SSDataBlock* pBlock); diff --git a/include/dnode/mnode/mnode.h b/include/dnode/mnode/mnode.h index ddd6f1c05f..ab090940f2 100644 --- a/include/dnode/mnode/mnode.h +++ b/include/dnode/mnode/mnode.h @@ -81,7 +81,7 @@ int32_t mndGetLoad(SMnode *pMnode, SMnodeLoad *pLoad); * @param pMsg The request msg. * @return int32_t 0 for success, -1 for failure. */ -int32_t mndProcessMsg(SRpcMsg *pMsg); +int32_t mndProcessRpcMsg(SRpcMsg *pMsg); int32_t mndProcessSyncMsg(SRpcMsg *pMsg); /** diff --git a/include/libs/catalog/catalog.h b/include/libs/catalog/catalog.h index 5b746015e3..8027b9394e 100644 --- a/include/libs/catalog/catalog.h +++ b/include/libs/catalog/catalog.h @@ -52,23 +52,31 @@ typedef struct SUserAuthInfo { AUTH_TYPE type; } SUserAuthInfo; +typedef struct SDbInfo { + int32_t vgVer; + int32_t tbNum; + int64_t dbId; +} SDbInfo; + typedef struct SCatalogReq { - SArray *pTableMeta; // element is SNAME SArray *pDbVgroup; // element is db full name + SArray *pDbCfg; // element is db full name + SArray *pDbInfo; // element is db full name + SArray *pTableMeta; // element is SNAME SArray *pTableHash; // element is SNAME SArray *pUdf; // element is udf name - SArray *pDbCfg; // element is db full name SArray *pIndex; // element is index name SArray *pUser; // element is SUserAuthInfo bool qNodeRequired; // valid qnode } SCatalogReq; typedef struct SMetaData { - SArray *pTableMeta; // SArray SArray *pDbVgroup; // SArray*> + SArray *pDbCfg; // SArray + SArray *pDbInfo; // SArray + SArray *pTableMeta; // SArray SArray *pTableHash; // SArray SArray *pUdfList; // SArray - SArray *pDbCfg; // SArray SArray *pIndex; // SArray SArray *pUser; // SArray SArray *pQnodeList; // SArray @@ -269,6 +277,8 @@ int32_t catalogChkAuth(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth); +int32_t catalogUpdateVgEpSet(SCatalog* pCtg, const char* dbFName, int32_t vgId, SEpSet *epSet); + int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, uint64_t reqId); diff --git a/include/libs/index/index.h b/include/libs/index/index.h index 05db99db0f..c3d31ffe38 100644 --- a/include/libs/index/index.h +++ b/include/libs/index/index.h @@ -192,11 +192,16 @@ void indexTermDestroy(SIndexTerm* p); void indexInit(); /* index filter */ +typedef struct SIndexMetaArg { + void* metaHandle; + uint64_t suid; +} SIndexMetaArg; + typedef enum { SFLT_NOT_INDEX, SFLT_COARSE_INDEX, SFLT_ACCURATE_INDEX } SIdxFltStatus; SIdxFltStatus idxGetFltStatus(SNode* pFilterNode); -int32_t doFilterTag(const SNode* pFilterNode, SArray* result); +int32_t doFilterTag(const SNode* pFilterNode, SIndexMetaArg* metaArg, SArray* result); /* * destory index env * diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 3ae2d18e5d..2648a468dd 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -56,6 +56,9 @@ typedef struct SScanLogicNode { int8_t intervalUnit; int8_t slidingUnit; SNode* pTagCond; + int8_t triggerType; + int64_t watermark; + int16_t tsColId; } SScanLogicNode; typedef struct SJoinLogicNode { @@ -216,6 +219,9 @@ typedef struct STableScanPhysiNode { int64_t sliding; int8_t intervalUnit; int8_t slidingUnit; + int8_t triggerType; + int64_t watermark; + int16_t tsColId; } STableScanPhysiNode; typedef STableScanPhysiNode STableSeqScanPhysiNode; diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h index a30f3be7a1..296b18e8de 100644 --- a/include/libs/qcom/query.h +++ b/include/libs/qcom/query.h @@ -43,6 +43,12 @@ typedef enum { TASK_TYPE_TEMP, } ETaskType; +typedef enum { + TARGET_TYPE_MNODE = 1, + TARGET_TYPE_VNODE, + TARGET_TYPE_OTHER, +} ETargetType; + typedef struct STableComInfo { uint8_t numOfTags; // the number of tags in schema uint8_t precision; // the number of precision @@ -126,11 +132,18 @@ typedef struct SDataBuf { void* handle; } SDataBuf; +typedef struct STargetInfo { + ETargetType type; + char dbFName[TSDB_DB_FNAME_LEN]; // used to update db's vgroup epset + int32_t vgId; +} STargetInfo; + typedef int32_t (*__async_send_cb_fn_t)(void* param, const SDataBuf* pMsg, int32_t code); typedef int32_t (*__async_exec_fn_t)(void* param); typedef struct SMsgSendInfo { __async_send_cb_fn_t fp; // async callback function + STargetInfo target; // for update epset void* param; uint64_t requestId; uint64_t requestObjRefId; diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 362df7b447..0c5c72906b 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -729,23 +729,55 @@ static void destroySendMsgInfo(SMsgSendInfo* pMsgBody) { taosMemoryFreeClear(pMsgBody); } +void updateTargetEpSet(SMsgSendInfo* pSendInfo, STscObj* pTscObj, SRpcMsg* pMsg, SEpSet* pEpSet) { + if (NULL == pEpSet) { + return; + } + + switch (pSendInfo->target.type) { + case TARGET_TYPE_MNODE: + if (NULL == pTscObj) { + tscError("mnode epset changed but not able to update it, reqObjRefId:%" PRIx64, pSendInfo->requestObjRefId); + return; + } + + updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, pEpSet); + break; + case TARGET_TYPE_VNODE: { + if (NULL == pTscObj) { + tscError("vnode epset changed but not able to update it, reqObjRefId:%" PRIx64, pSendInfo->requestObjRefId); + return; + } + + SCatalog* pCatalog = NULL; + int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); + if (code != TSDB_CODE_SUCCESS) { + tscError("fail to get catalog handle, clusterId:%" PRIx64 ", error %s", pTscObj->pAppInfo->clusterId, tstrerror(code)); + return; + } + + catalogUpdateVgEpSet(pCatalog, pSendInfo->target.dbFName, pSendInfo->target.vgId, pEpSet); + break; + } + default: + tscDebug("epset changed, not updated, msgType %s", TMSG_INFO(pMsg->msgType)); + break; + } +} + + void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle; assert(pMsg->info.ahandle != NULL); + SRequestObj* pRequest = NULL; + STscObj* pTscObj = NULL; if (pSendInfo->requestObjRefId != 0) { SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId); assert(pRequest->self == pSendInfo->requestObjRefId); pRequest->metric.rsp = taosGetTimestampUs(); - - //STscObj* pTscObj = pRequest->pTscObj; - //if (pEpSet) { - // if (!isEpsetEqual(&pTscObj->pAppInfo->mgmtEp.epSet, pEpSet)) { - // updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, pEpSet); - // } - //} - + pTscObj = pRequest->pTscObj; /* * There is not response callback function for submit response. * The actual inserted number of points is the first number. @@ -762,6 +794,8 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { taosReleaseRef(clientReqRefPool, pSendInfo->requestObjRefId); } + updateTargetEpSet(pSendInfo, pTscObj, pMsg, pEpSet); + SDataBuf buf = {.len = pMsg->contLen, .pData = NULL, .handle = pMsg->info.handle}; if (pMsg->contLen > 0) { diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 0f34d52d35..33dee016d6 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1153,7 +1153,9 @@ void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows) { if (IS_VAR_DATA_TYPE(pColumn->info.type)) { pColumn->varmeta.length = 0; } else { - memset(pColumn->nullbitmap, 0, BitmapLen(numOfRows)); + if (pColumn->nullbitmap != NULL) { + memset(pColumn->nullbitmap, 0, BitmapLen(numOfRows)); + } } } diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index d0a2ddd9bb..141ec4f03b 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -293,7 +293,7 @@ int32_t taosAddClientLogCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "jniDebugFlag", jniDebugFlag, 0, 255, 1) != 0) return -1; if (cfgAddInt32(pCfg, "simDebugFlag", 143, 0, 255, 1) != 0) return -1; if (cfgAddInt32(pCfg, "debugFlag", 0, 0, 255, 1) != 0) return -1; - if (cfgAddInt32(pCfg, "idxDebugFlag", 0, 0, 255, 1) != 0) return -1; + if (cfgAddInt32(pCfg, "idxDebugFlag", idxDebugFlag, 0, 255, 1) != 0) return -1; return 0; } diff --git a/source/common/src/trow.c b/source/common/src/trow.c index 4d0846f6c2..cc18240325 100644 --- a/source/common/src/trow.c +++ b/source/common/src/trow.c @@ -1191,9 +1191,9 @@ bool tdGetTpRowDataOfCol(STSRowIter *pIter, col_type_t colType, int32_t offset, } static FORCE_INLINE int32_t compareKvRowColId(const void *key1, const void *key2) { - if (*(int16_t *)key1 > ((SColIdx *)key2)->colId) { + if (*(col_id_t *)key1 > ((SKvRowIdx *)key2)->colId) { return 1; - } else if (*(int16_t *)key1 < ((SColIdx *)key2)->colId) { + } else if (*(col_id_t *)key1 < ((SKvRowIdx *)key2)->colId) { return -1; } else { return 0; diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index 38ad948981..685ee16893 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -528,14 +528,14 @@ int32_t convertStringToTimestamp(int16_t type, char *inputData, int64_t timePrec } taosMemoryFree(newColData); } else if (type == TSDB_DATA_TYPE_NCHAR) { - newColData = taosMemoryCalloc(1, charLen / TSDB_NCHAR_SIZE + 1); + newColData = taosMemoryCalloc(1, charLen + TSDB_NCHAR_SIZE); int len = taosUcs4ToMbs((TdUcs4 *)varDataVal(inputData), charLen, newColData); if (len < 0){ taosMemoryFree(newColData); return TSDB_CODE_FAILED; } newColData[len] = 0; - bool ret = taosParseTime(newColData, timeVal, len + 1, (int32_t)timePrec, tsDaylight); + int32_t ret = taosParseTime(newColData, timeVal, len + 1, (int32_t)timePrec, tsDaylight); if (ret != TSDB_CODE_SUCCESS) { taosMemoryFree(newColData); return ret; diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c b/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c index 85120102bc..1de9875d06 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c @@ -40,7 +40,7 @@ static void mmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { break; default: pMsg->info.node = pMgmt->pMnode; - code = mndProcessMsg(pMsg); + code = mndProcessRpcMsg(pMsg); } if (IsReq(pMsg) && pMsg->info.handle != NULL && code != TSDB_CODE_ACTION_IN_PROGRESS) { diff --git a/source/dnode/mnode/impl/inc/mndInt.h b/source/dnode/mnode/impl/inc/mndInt.h index cec49a4cbe..6661347e42 100644 --- a/source/dnode/mnode/impl/inc/mndInt.h +++ b/source/dnode/mnode/impl/inc/mndInt.h @@ -75,13 +75,12 @@ typedef struct { } STelemMgmt; typedef struct { - SWal *pWal; - sem_t syncSem; - int64_t sync; - bool standby; - bool restored; - int32_t errCode; - int32_t transId; + SWal *pWal; + sem_t syncSem; + int64_t sync; + bool standby; + int32_t errCode; + int32_t transId; } SSyncMgmt; typedef struct { @@ -90,34 +89,45 @@ typedef struct { } SGrantInfo; typedef struct SMnode { - int32_t selfDnodeId; - int64_t clusterId; - TdThread thread; - bool deploy; - bool stopped; - int8_t replica; - int8_t selfIndex; - SReplica replicas[TSDB_MAX_REPLICA]; - char *path; - int64_t checkTime; - SSdb *pSdb; - SMgmtWrapper *pWrapper; - SArray *pSteps; - SQHandle *pQuery; - SShowMgmt showMgmt; - SProfileMgmt profileMgmt; - STelemMgmt telemMgmt; - SSyncMgmt syncMgmt; - SHashObj *infosMeta; - SHashObj *perfsMeta; - SGrantInfo grant; - MndMsgFp msgFp[TDMT_MAX]; - SMsgCb msgCb; + int32_t selfDnodeId; + int64_t clusterId; + TdThread thread; + TdThreadRwlock lock; + int32_t rpcRef; + int32_t syncRef; + bool stopped; + bool restored; + bool deploy; + int8_t replica; + int8_t selfIndex; + SReplica replicas[TSDB_MAX_REPLICA]; + char *path; + int64_t checkTime; + SSdb *pSdb; + SArray *pSteps; + SQHandle *pQuery; + SHashObj *infosMeta; + SHashObj *perfsMeta; + SShowMgmt showMgmt; + SProfileMgmt profileMgmt; + STelemMgmt telemMgmt; + SSyncMgmt syncMgmt; + SGrantInfo grant; + MndMsgFp msgFp[TDMT_MAX]; + SMsgCb msgCb; } SMnode; void mndSetMsgHandle(SMnode *pMnode, tmsg_t msgType, MndMsgFp fp); int64_t mndGenerateUid(char *name, int32_t len); +int32_t mndAcquireRpcRef(SMnode *pMnode); +void mndReleaseRpcRef(SMnode *pMnode); +void mndSetRestore(SMnode *pMnode, bool restored); +void mndSetStop(SMnode *pMnode); +bool mndGetStop(SMnode *pMnode); +int32_t mndAcquireSyncRef(SMnode *pMnode); +void mndReleaseSyncRef(SMnode *pMnode); + #ifdef __cplusplus } #endif diff --git a/source/dnode/mnode/impl/src/mnode.c b/source/dnode/mnode/impl/src/mndMain.c similarity index 83% rename from source/dnode/mnode/impl/src/mnode.c rename to source/dnode/mnode/impl/src/mndMain.c index 5458bc5126..995fe83cc5 100644 --- a/source/dnode/mnode/impl/src/mnode.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -85,7 +85,7 @@ static void *mndThreadFp(void *param) { while (1) { lastTime++; taosMsleep(100); - if (pMnode->stopped) break; + if (mndGetStop(pMnode)) break; if (lastTime % (tsTransPullupInterval * 10) == 0) { mndPullupTrans(pMnode); @@ -118,7 +118,6 @@ static int32_t mndInitTimer(SMnode *pMnode) { } static void mndCleanupTimer(SMnode *pMnode) { - pMnode->stopped = true; if (taosCheckPthreadValid(pMnode->thread)) { taosThreadJoin(pMnode->thread, NULL); taosThreadClear(&pMnode->thread); @@ -335,15 +334,19 @@ void mndClose(SMnode *pMnode) { int32_t mndStart(SMnode *pMnode) { mndSyncStart(pMnode); if (pMnode->deploy) { - if (sdbDeploy(pMnode->pSdb) != 0) return -1; - pMnode->syncMgmt.restored = true; + if (sdbDeploy(pMnode->pSdb) != 0) { + mError("failed to deploy sdb while start mnode"); + return -1; + } + mndSetRestore(pMnode, true); } return mndInitTimer(pMnode); } void mndStop(SMnode *pMnode) { + mndSetStop(pMnode); mndSyncStop(pMnode); - return mndCleanupTimer(pMnode); + mndCleanupTimer(pMnode); } int32_t mndProcessSyncMsg(SRpcMsg *pMsg) { @@ -362,6 +365,11 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) { return TAOS_SYNC_PROPOSE_OTHER_ERROR; } + if (mndAcquireSyncRef(pMnode) != 0) { + mError("failed to process sync msg:%p type:%s since %s", pMsg, TMSG_INFO(pMsg->msgType), terrstr()); + return TAOS_SYNC_PROPOSE_OTHER_ERROR; + } + char logBuf[512]; char *syncNodeStr = sync2SimpleStr(pMgmt->sync); snprintf(logBuf, sizeof(logBuf), "==vnodeProcessSyncReq== msgType:%d, syncNode: %s", pMsg->msgType, syncNodeStr); @@ -405,59 +413,45 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) { code = TAOS_SYNC_PROPOSE_OTHER_ERROR; } + mndReleaseSyncRef(pMnode); return code; } -static int32_t mndCheckMnodeMaster(SRpcMsg *pMsg) { - if (!IsReq(pMsg)) return 0; - if (mndIsMaster(pMsg->info.node)) return 0; +static int32_t mndCheckMnodeState(SRpcMsg *pMsg) { + if (mndAcquireRpcRef(pMsg->info.node) == 0) return 0; - if (pMsg->msgType == TDMT_MND_MQ_TIMER || pMsg->msgType == TDMT_MND_TELEM_TIMER || - pMsg->msgType == TDMT_MND_TRANS_TIMER) { - return -1; - } - mError("msg:%p, failed to check master since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle, - TMSG_INFO(pMsg->msgType)); + if (IsReq(pMsg) && pMsg->msgType != TDMT_MND_MQ_TIMER && pMsg->msgType != TDMT_MND_TELEM_TIMER && + pMsg->msgType != TDMT_MND_TRANS_TIMER) { + mError("msg:%p, failed to check mnode state since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle, + TMSG_INFO(pMsg->msgType)); - SEpSet epSet = {0}; - mndGetMnodeEpSet(pMsg->info.node, &epSet); + SEpSet epSet = {0}; + mndGetMnodeEpSet(pMsg->info.node, &epSet); -#if 0 - mTrace("msg:%p, is redirected, num:%d use:%d", pMsg, epSet.numOfEps, epSet.inUse); - for (int32_t i = 0; i < epSet.numOfEps; ++i) { - mTrace("mnode index:%d %s:%u", i, epSet.eps[i].fqdn, epSet.eps[i].port); - if (strcmp(epSet.eps[i].fqdn, tsLocalFqdn) == 0 && epSet.eps[i].port == tsServerPort) { - epSet.inUse = (i + 1) % epSet.numOfEps; + int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet); + pMsg->info.rsp = rpcMallocCont(contLen); + if (pMsg->info.rsp != NULL) { + tSerializeSEpSet(pMsg->info.rsp, contLen, &epSet); + pMsg->info.rspLen = contLen; + terrno = TSDB_CODE_RPC_REDIRECT; + } else { + terrno = TSDB_CODE_OUT_OF_MEMORY; } } -#endif - - int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet); - pMsg->info.rsp = rpcMallocCont(contLen); - if (pMsg->info.rsp != NULL) { - tSerializeSEpSet(pMsg->info.rsp, contLen, &epSet); - pMsg->info.rspLen = contLen; - terrno = TSDB_CODE_RPC_REDIRECT; - } else { - terrno = TSDB_CODE_OUT_OF_MEMORY; - } return -1; } -static int32_t mndCheckRequestValid(SRpcMsg *pMsg) { +static int32_t mndCheckMsgContent(SRpcMsg *pMsg) { if (!IsReq(pMsg)) return 0; if (pMsg->contLen != 0 && pMsg->pCont != NULL) return 0; - mError("msg:%p, failed to valid request, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); + mError("msg:%p, failed to check msg content, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); terrno = TSDB_CODE_INVALID_MSG_LEN; return -1; } -int32_t mndProcessMsg(SRpcMsg *pMsg) { - if (mndCheckMnodeMaster(pMsg) != 0) return -1; - if (mndCheckRequestValid(pMsg) != 0) return -1; - +int32_t mndProcessRpcMsg(SRpcMsg *pMsg) { SMnode *pMnode = pMsg->info.node; MndMsgFp fp = pMnode->msgFp[TMSG_INDEX(pMsg->msgType)]; if (fp == NULL) { @@ -466,8 +460,13 @@ int32_t mndProcessMsg(SRpcMsg *pMsg) { return -1; } - mTrace("msg:%p, will be processed in mnode, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); + if (mndCheckMsgContent(pMsg) != 0) return -1; + if (mndCheckMnodeState(pMsg) != 0) return -1; + + mTrace("msg:%p, start to process in mnode, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); int32_t code = (*fp)(pMsg); + mndReleaseRpcRef(pMnode); + if (code == TSDB_CODE_ACTION_IN_PROGRESS) { mTrace("msg:%p, won't response immediately since in progress", pMsg); } else if (code == 0) { @@ -476,6 +475,7 @@ int32_t mndProcessMsg(SRpcMsg *pMsg) { mError("msg:%p, failed to process since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); } + return code; } @@ -502,7 +502,7 @@ int64_t mndGenerateUid(char *name, int32_t len) { int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgroupInfo *pVgroupInfo, SMonGrantInfo *pGrantInfo) { - if (!mndIsMaster(pMnode)) return -1; + if (mndAcquireRpcRef(pMnode) != 0) return -1; SSdb *pSdb = pMnode->pSdb; int64_t ms = taosGetTimestampMs(); @@ -511,6 +511,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr pClusterInfo->mnodes = taosArrayInit(sdbGetSize(pSdb, SDB_MNODE), sizeof(SMonMnodeDesc)); pVgroupInfo->vgroups = taosArrayInit(sdbGetSize(pSdb, SDB_VGROUP), sizeof(SMonVgroupDesc)); if (pClusterInfo->dnodes == NULL || pClusterInfo->mnodes == NULL || pVgroupInfo->vgroups == NULL) { + mndReleaseRpcRef(pMnode); return -1; } @@ -605,6 +606,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr pGrantInfo->timeseries_total = INT32_MAX; } + mndReleaseRpcRef(pMnode); return 0; } @@ -612,3 +614,76 @@ int32_t mndGetLoad(SMnode *pMnode, SMnodeLoad *pLoad) { pLoad->syncState = syncGetMyRole(pMnode->syncMgmt.sync); return 0; } + +int32_t mndAcquireRpcRef(SMnode *pMnode) { + int32_t code = 0; + taosThreadRwlockRdlock(&pMnode->lock); + if (pMnode->stopped) { + terrno = TSDB_CODE_APP_NOT_READY; + code = -1; + } else if (!mndIsMaster(pMnode)) { + code = -1; + } else { + int32_t ref = atomic_add_fetch_32(&pMnode->rpcRef, 1); + mTrace("mnode rpc is acquired, ref:%d", ref); + } + taosThreadRwlockUnlock(&pMnode->lock); + return code; +} + +void mndReleaseRpcRef(SMnode *pMnode) { + taosThreadRwlockRdlock(&pMnode->lock); + int32_t ref = atomic_sub_fetch_32(&pMnode->rpcRef, 1); + mTrace("mnode rpc is released, ref:%d", ref); + taosThreadRwlockUnlock(&pMnode->lock); +} + +void mndSetRestore(SMnode *pMnode, bool restored) { + if (restored) { + taosThreadRwlockWrlock(&pMnode->lock); + pMnode->restored = true; + taosThreadRwlockUnlock(&pMnode->lock); + mTrace("mnode set restored:%d", restored); + } else { + taosThreadRwlockWrlock(&pMnode->lock); + pMnode->restored = false; + taosThreadRwlockUnlock(&pMnode->lock); + mTrace("mnode set restored:%d", restored); + while (1) { + if (pMnode->rpcRef <= 0) break; + taosMsleep(3); + } + } +} + +bool mndGetRestored(SMnode *pMnode) { return pMnode->restored; } + +void mndSetStop(SMnode *pMnode) { + taosThreadRwlockWrlock(&pMnode->lock); + pMnode->stopped = true; + taosThreadRwlockUnlock(&pMnode->lock); + mTrace("mnode set stopped"); +} + +bool mndGetStop(SMnode *pMnode) { return pMnode->stopped; } + +int32_t mndAcquireSyncRef(SMnode *pMnode) { + int32_t code = 0; + taosThreadRwlockRdlock(&pMnode->lock); + if (pMnode->stopped) { + terrno = TSDB_CODE_APP_NOT_READY; + code = -1; + } else { + int32_t ref = atomic_add_fetch_32(&pMnode->syncRef, 1); + mTrace("mnode sync is acquired, ref:%d", ref); + } + taosThreadRwlockUnlock(&pMnode->lock); + return code; +} + +void mndReleaseSyncRef(SMnode *pMnode) { + taosThreadRwlockRdlock(&pMnode->lock); + int32_t ref = atomic_sub_fetch_32(&pMnode->syncRef, 1); + mTrace("mnode sync is released, ref:%d", ref); + taosThreadRwlockUnlock(&pMnode->lock); +} \ No newline at end of file diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 17cf5d43b5..3f3f4f5b5d 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -940,7 +940,7 @@ static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock } // do not show for cleared subscription -#if 0 +#if 1 int32_t sz = taosArrayGetSize(pSub->unassignedVgs); for (int32_t i = 0; i < sz; i++) { SMqVgEp *pVgEp = taosArrayGetP(pSub->unassignedVgs, i); diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index b53b3003c1..c6ab916ee1 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -17,12 +17,12 @@ #include "mndSync.h" #include "mndTrans.h" -int32_t mndSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) { +int32_t mndSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) { SMsgHead *pHead = pMsg->pCont; pHead->contLen = htonl(pHead->contLen); pHead->vgId = htonl(pHead->vgId); - return tmsgPutToQueue(msgcb, SYNC_QUEUE, pMsg); + return tmsgPutToQueue(msgcb, SYNC_QUEUE, pMsg); } int32_t mndSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { return tmsgSendReq(pEpSet, pMsg); } @@ -32,7 +32,7 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM SSyncMgmt *pMgmt = &pMnode->syncMgmt; SSdbRaw *pRaw = pMsg->pCont; - int32_t transId = sdbGetIdFromRaw(pRaw); + int32_t transId = sdbGetIdFromRaw(pMnode->pSdb, pRaw); pMgmt->errCode = cbMeta.code; mTrace("trans:%d, is proposed, savedTransId:%d code:0x%x, ver:%" PRId64 " term:%" PRId64 " role:%s raw:%p", transId, pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term, syncStr(cbMeta.state), pRaw); @@ -63,28 +63,41 @@ void mndRestoreFinish(struct SSyncFSM *pFsm) { if (!pMnode->deploy) { mInfo("mnode sync restore finished"); mndTransPullup(pMnode); - pMnode->syncMgmt.restored = true; + mndSetRestore(pMnode, true); } } -int32_t mndSnapshotRead(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, void** ppIter, char** ppBuf, int32_t* len) { - /* +int32_t mndSnapshotRead(struct SSyncFSM *pFsm, const SSnapshot *pSnapshot, void **ppIter, char **ppBuf, int32_t *len) { SMnode *pMnode = pFsm->data; - SSdbIter *pIter; - if (iter == NULL) { - pIter = sdbIterInit(pMnode->sdb) + mInfo("start to read snapshot from sdb"); + + int32_t code = sdbReadSnapshot(pMnode->pSdb, (SSdbIter **)ppIter, ppBuf, len); + if (code != 0) { + mError("failed to read snapshot from sdb since %s", terrstr()); } else { - pIter = iter; + if (*ppIter == NULL) { + mInfo("successfully to read snapshot from sdb"); + } } - */ - return 0; + return code; } -int32_t mndSnapshotApply(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, char* pBuf, int32_t len) { +int32_t mndSnapshotApply(struct SSyncFSM *pFsm, const SSnapshot *pSnapshot, char *pBuf, int32_t len) { SMnode *pMnode = pFsm->data; - sdbWrite(pMnode->pSdb, (SSdbRaw*)pBuf); - return 0; + mndSetRestore(pMnode, false); + mInfo("start to apply snapshot to sdb, len:%d", len); + + int32_t code = sdbApplySnapshot(pMnode->pSdb, pBuf, len); + if (code != 0) { + mError("failed to apply snapshot to sdb, len:%d", len); + } else { + mInfo("successfully to apply snapshot to sdb, len:%d", len); + mndSetRestore(pMnode, true); + } + + // taosMemoryFree(pBuf); + return code; } void mndReConfig(struct SSyncFSM *pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) { @@ -116,7 +129,7 @@ SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) { pFsm->FpSnapshotRead = mndSnapshotRead; pFsm->FpSnapshotApply = mndSnapshotApply; pFsm->FpReConfigCb = mndReConfig; - + return pFsm; } @@ -150,8 +163,7 @@ int32_t mndInitSync(SMnode *pMnode) { SSyncCfg *pCfg = &syncInfo.syncCfg; pCfg->replicaNum = pMnode->replica; pCfg->myIndex = pMnode->selfIndex; - mInfo("start to open mnode sync, replica:%d myindex:%d standby:%d", pCfg->replicaNum, pCfg->myIndex, - pMgmt->standby); + mInfo("start to open mnode sync, replica:%d myindex:%d standby:%d", pCfg->replicaNum, pCfg->myIndex, pMgmt->standby); for (int32_t i = 0; i < pMnode->replica; ++i) { SNodeInfo *pNode = &pCfg->nodeInfo[i]; tstrncpy(pNode->nodeFqdn, pMnode->replicas[i].fqdn, sizeof(pNode->nodeFqdn)); @@ -238,7 +250,7 @@ bool mndIsMaster(SMnode *pMnode) { return false; } - if (!pMgmt->restored) { + if (!pMnode->restored) { terrno = TSDB_CODE_APP_NOT_READY; return false; } diff --git a/source/dnode/mnode/sdb/inc/sdb.h b/source/dnode/mnode/sdb/inc/sdb.h index 3932defd8d..411d4c59ea 100644 --- a/source/dnode/mnode/sdb/inc/sdb.h +++ b/source/dnode/mnode/sdb/inc/sdb.h @@ -166,7 +166,6 @@ typedef struct SSdbRow { typedef struct SSdb { SMnode *pMnode; char *currDir; - char *syncDir; char *tmpDir; int64_t lastCommitVer; int64_t curVer; @@ -182,11 +181,12 @@ typedef struct SSdb { SdbDeployFp deployFps[SDB_MAX]; SdbEncodeFp encodeFps[SDB_MAX]; SdbDecodeFp decodeFps[SDB_MAX]; + TdThreadMutex filelock; } SSdb; typedef struct SSdbIter { TdFilePtr file; - int64_t readlen; + int64_t total; } SSdbIter; typedef struct { @@ -380,13 +380,12 @@ SSdbRow *sdbAllocRow(int32_t objSize); void *sdbGetRowObj(SSdbRow *pRow); void sdbFreeRow(SSdb *pSdb, SSdbRow *pRow, bool callFunc); -SSdbIter *sdbIterInit(SSdb *pSdb); -SSdbIter *sdbIterRead(SSdb *pSdb, SSdbIter *iter, char **ppBuf, int32_t *len); +int32_t sdbReadSnapshot(SSdb *pSdb, SSdbIter **ppIter, char **ppBuf, int32_t *len); +int32_t sdbApplySnapshot(SSdb *pSdb, char *pBuf, int32_t len); const char *sdbTableName(ESdbType type); void sdbPrintOper(SSdb *pSdb, SSdbRow *pRow, const char *oper); - -int32_t sdbGetIdFromRaw(SSdbRaw *pRaw); +int32_t sdbGetIdFromRaw(SSdb *pSdb, SSdbRaw *pRaw); #ifdef __cplusplus } diff --git a/source/dnode/mnode/sdb/inc/sdbInt.h b/source/dnode/mnode/sdb/inc/sdbInt.h deleted file mode 100644 index c49d6e8fb2..0000000000 --- a/source/dnode/mnode/sdb/inc/sdbInt.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#ifndef _TD_SDB_INT_H_ -#define _TD_SDB_INT_H_ - -#include "os.h" - -#include "sdb.h" - -#ifdef __cplusplus -extern "C" { -#endif - -// clang-format off -#define mFatal(...) { if (mDebugFlag & DEBUG_FATAL) { taosPrintLog("MND FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} -#define mError(...) { if (mDebugFlag & DEBUG_ERROR) { taosPrintLog("MND ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} -#define mWarn(...) { if (mDebugFlag & DEBUG_WARN) { taosPrintLog("MND WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} -#define mInfo(...) { if (mDebugFlag & DEBUG_INFO) { taosPrintLog("MND ", DEBUG_INFO, 255, __VA_ARGS__); }} -#define mDebug(...) { if (mDebugFlag & DEBUG_DEBUG) { taosPrintLog("MND ", DEBUG_DEBUG, mDebugFlag, __VA_ARGS__); }} -#define mTrace(...) { if (mDebugFlag & DEBUG_TRACE) { taosPrintLog("MND ", DEBUG_TRACE, mDebugFlag, __VA_ARGS__); }} -// clang-format on - -typedef struct SSdbRaw { - int8_t type; - int8_t status; - int8_t sver; - int8_t reserved; - int32_t dataLen; - char pData[]; -} SSdbRaw; - -typedef struct SSdbRow { - ESdbType type; - ESdbStatus status; - int32_t refCount; - char pObj[]; -} SSdbRow; - -const char *sdbTableName(ESdbType type); -void sdbPrintOper(SSdb *pSdb, SSdbRow *pRow, const char *oper); - -void sdbFreeRow(SSdb *pSdb, SSdbRow *pRow, bool callFunc); - -#ifdef __cplusplus -} -#endif - -#endif /*_TD_SDB_INT_H_*/ diff --git a/source/dnode/mnode/sdb/src/sdb.c b/source/dnode/mnode/sdb/src/sdb.c index d289e30d7b..aef3476440 100644 --- a/source/dnode/mnode/sdb/src/sdb.c +++ b/source/dnode/mnode/sdb/src/sdb.c @@ -56,6 +56,7 @@ SSdb *sdbInit(SSdbOpt *pOption) { pSdb->curTerm = -1; pSdb->lastCommitVer = -1; pSdb->pMnode = pOption->pMnode; + taosThreadMutexInit(&pSdb->filelock, NULL); mDebug("sdb init successfully"); return pSdb; } @@ -69,10 +70,6 @@ void sdbCleanup(SSdb *pSdb) { taosMemoryFreeClear(pSdb->currDir); } - if (pSdb->syncDir != NULL) { - taosMemoryFreeClear(pSdb->syncDir); - } - if (pSdb->tmpDir != NULL) { taosMemoryFreeClear(pSdb->tmpDir); } @@ -104,6 +101,7 @@ void sdbCleanup(SSdb *pSdb) { mDebug("sdb table:%s is cleaned up", sdbTableName(i)); } + taosThreadMutexDestroy(&pSdb->filelock); taosMemoryFree(pSdb); mDebug("sdb is cleaned up"); } diff --git a/source/dnode/mnode/sdb/src/sdbFile.c b/source/dnode/mnode/sdb/src/sdbFile.c index 25cda19956..b2dcbd68e3 100644 --- a/source/dnode/mnode/sdb/src/sdbFile.c +++ b/source/dnode/mnode/sdb/src/sdbFile.c @@ -22,13 +22,14 @@ #define SDB_RESERVE_SIZE 512 #define SDB_FILE_VER 1 -static int32_t sdbRunDeployFp(SSdb *pSdb) { +static int32_t sdbDeployData(SSdb *pSdb) { mDebug("start to deploy sdb"); for (int32_t i = SDB_MAX - 1; i >= 0; --i) { SdbDeployFp fp = pSdb->deployFps[i]; if (fp == NULL) continue; + mDebug("start to deploy sdb:%s", sdbTableName(i)); if ((*fp)(pSdb->pMnode) != 0) { mError("failed to deploy sdb:%s since %s", sdbTableName(i), terrstr()); return -1; @@ -39,6 +40,39 @@ static int32_t sdbRunDeployFp(SSdb *pSdb) { return 0; } +static void sdbResetData(SSdb *pSdb) { + mDebug("start to reset sdb"); + + for (ESdbType i = 0; i < SDB_MAX; ++i) { + SHashObj *hash = pSdb->hashObjs[i]; + if (hash == NULL) continue; + + SSdbRow **ppRow = taosHashIterate(hash, NULL); + while (ppRow != NULL) { + SSdbRow *pRow = *ppRow; + if (pRow == NULL) continue; + + sdbFreeRow(pSdb, pRow, true); + ppRow = taosHashIterate(hash, ppRow); + } + } + + for (ESdbType i = 0; i < SDB_MAX; ++i) { + SHashObj *hash = pSdb->hashObjs[i]; + if (hash == NULL) continue; + + taosHashClear(pSdb->hashObjs[i]); + pSdb->tableVer[i] = 0; + pSdb->maxId[i] = 0; + mDebug("sdb:%s is reset", sdbTableName(i)); + } + + pSdb->curVer = -1; + pSdb->curTerm = -1; + pSdb->lastCommitVer = -1; + mDebug("sdb reset successfully"); +} + static int32_t sdbReadFileHead(SSdb *pSdb, TdFilePtr pFile) { int64_t sver = 0; int32_t ret = taosReadFile(pFile, &sver, sizeof(int64_t)); @@ -169,11 +203,15 @@ static int32_t sdbWriteFileHead(SSdb *pSdb, TdFilePtr pFile) { return 0; } -int32_t sdbReadFile(SSdb *pSdb) { +static int32_t sdbReadFileImp(SSdb *pSdb) { int64_t offset = 0; int32_t code = 0; int32_t readLen = 0; int64_t ret = 0; + char file[PATH_MAX] = {0}; + + snprintf(file, sizeof(file), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); + mDebug("start to read file:%s", file); SSdbRaw *pRaw = taosMemoryMalloc(WAL_MAX_SIZE + 100); if (pRaw == NULL) { @@ -182,10 +220,6 @@ int32_t sdbReadFile(SSdb *pSdb) { return -1; } - char file[PATH_MAX] = {0}; - snprintf(file, sizeof(file), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); - mDebug("start to read file:%s", file); - TdFilePtr pFile = taosOpenFile(file, TD_FILE_READ); if (pFile == NULL) { taosMemoryFree(pRaw); @@ -196,8 +230,6 @@ int32_t sdbReadFile(SSdb *pSdb) { if (sdbReadFileHead(pSdb, pFile) != 0) { mError("failed to read file:%s head since %s", file, terrstr()); - pSdb->curVer = -1; - pSdb->curTerm = -1; taosMemoryFree(pRaw); taosCloseFile(&pFile); return -1; @@ -264,6 +296,20 @@ _OVER: return code; } +int32_t sdbReadFile(SSdb *pSdb) { + taosThreadMutexLock(&pSdb->filelock); + + sdbResetData(pSdb); + int32_t code = sdbReadFileImp(pSdb); + if (code != 0) { + mError("failed to read sdb since %s", terrstr()); + sdbResetData(pSdb); + } + + taosThreadMutexUnlock(&pSdb->filelock); + return code; +} + static int32_t sdbWriteFileImp(SSdb *pSdb) { int32_t code = 0; @@ -378,32 +424,41 @@ int32_t sdbWriteFile(SSdb *pSdb) { return 0; } - return sdbWriteFileImp(pSdb); + taosThreadMutexLock(&pSdb->filelock); + int32_t code = sdbWriteFileImp(pSdb); + if (code != 0) { + mError("failed to write sdb since %s", terrstr()); + } + taosThreadMutexUnlock(&pSdb->filelock); + return code; } int32_t sdbDeploy(SSdb *pSdb) { - if (sdbRunDeployFp(pSdb) != 0) { + if (sdbDeployData(pSdb) != 0) { return -1; } - if (sdbWriteFileImp(pSdb) != 0) { + if (sdbWriteFile(pSdb) != 0) { return -1; } return 0; } -SSdbIter *sdbIterInit(SSdb *pSdb) { +static SSdbIter *sdbOpenIter(SSdb *pSdb) { char datafile[PATH_MAX] = {0}; char tmpfile[PATH_MAX] = {0}; snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); - snprintf(tmpfile, sizeof(datafile), "%s%ssdb.data", pSdb->tmpDir, TD_DIRSEP); + snprintf(tmpfile, sizeof(tmpfile), "%s%ssdb.data", pSdb->tmpDir, TD_DIRSEP); + taosThreadMutexLock(&pSdb->filelock); if (taosCopyFile(datafile, tmpfile) != 0) { + taosThreadMutexUnlock(&pSdb->filelock); terrno = TAOS_SYSTEM_ERROR(errno); mError("failed to copy file %s to %s since %s", datafile, tmpfile, terrstr()); return NULL; } + taosThreadMutexUnlock(&pSdb->filelock); SSdbIter *pIter = taosMemoryCalloc(1, sizeof(SSdbIter)); if (pIter == NULL) { @@ -414,44 +469,144 @@ SSdbIter *sdbIterInit(SSdb *pSdb) { pIter->file = taosOpenFile(tmpfile, TD_FILE_READ); if (pIter->file == NULL) { terrno = TAOS_SYSTEM_ERROR(errno); - mError("failed to read snapshot file:%s since %s", tmpfile, terrstr()); + mError("failed to read file:%s since %s", tmpfile, terrstr()); taosMemoryFree(pIter); return NULL; } - mDebug("start to read snapshot file:%s, iter:%p", tmpfile, pIter); return pIter; } -SSdbIter *sdbIterRead(SSdb *pSdb, SSdbIter *pIter, char **ppBuf, int32_t *buflen) { - const int32_t maxlen = 100; +static void sdbCloseIter(SSdb *pSdb, SSdbIter *pIter) { + if (pIter == NULL) return; + if (pIter->file != NULL) { + taosCloseFile(&pIter->file); + } - char *pBuf = taosMemoryCalloc(1, maxlen); + char tmpfile[PATH_MAX] = {0}; + snprintf(tmpfile, sizeof(tmpfile), "%s%ssdb.data", pSdb->tmpDir, TD_DIRSEP); + taosRemoveFile(tmpfile); + + taosMemoryFree(pIter); + mInfo("sdbiter:%p, is closed", pIter); +} + +static SSdbIter *sdbGetIter(SSdb *pSdb, SSdbIter **ppIter) { + SSdbIter *pIter = NULL; + if (ppIter != NULL) pIter = *ppIter; + + if (pIter == NULL) { + pIter = sdbOpenIter(pSdb); + if (pIter != NULL) { + mInfo("sdbiter:%p, is created to read snapshot", pIter); + *ppIter = pIter; + } else { + mError("failed to create sdbiter to read snapshot since %s", terrstr()); + *ppIter = NULL; + return NULL; + } + } else { + mInfo("sdbiter:%p, continue to read snapshot, total:%" PRId64, pIter, pIter->total); + } + + return pIter; +} + +int32_t sdbReadSnapshot(SSdb *pSdb, SSdbIter **ppIter, char **ppBuf, int32_t *len) { + SSdbIter *pIter = sdbGetIter(pSdb, ppIter); + if (pIter == NULL) return -1; + + int32_t maxlen = 100; + char *pBuf = taosMemoryCalloc(1, maxlen); if (pBuf == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; - return NULL; + sdbCloseIter(pSdb, pIter); + return -1; } int32_t readlen = taosReadFile(pIter->file, pBuf, maxlen); - if (readlen == 0) { - mTrace("read snapshot to the end, readlen:%" PRId64, pIter->readlen); - taosMemoryFree(pBuf); - taosCloseFile(&pIter->file); - taosMemoryFree(pIter); - pIter = NULL; - } else if (readlen < 0) { + if (readlen < 0 || (readlen == 0 && errno != 0)) { terrno = TAOS_SYSTEM_ERROR(errno); - mError("failed to read snapshot since %s, readlen:%" PRId64, terrstr(), pIter->readlen); + mError("sdbiter:%p, failed to read snapshot since %s, total:%" PRId64, pIter, terrstr(), pIter->total); + *ppBuf = NULL; + *len = 0; + *ppIter = NULL; + sdbCloseIter(pSdb, pIter); taosMemoryFree(pBuf); - taosCloseFile(&pIter->file); - taosMemoryFree(pIter); - pIter = NULL; - } else { - pIter->readlen += readlen; - mTrace("read snapshot, readlen:%" PRId64, pIter->readlen); + return -1; + } else if (readlen == 0) { + mInfo("sdbiter:%p, read snapshot to the end, total:%" PRId64, pIter, pIter->total); + *ppBuf = NULL; + *len = 0; + *ppIter = NULL; + sdbCloseIter(pSdb, pIter); + taosMemoryFree(pBuf); + return 0; + } else if ((readlen < maxlen && errno != 0) || readlen == maxlen) { + pIter->total += readlen; + mInfo("sdbiter:%p, read:%d bytes from snapshot, total:%" PRId64, pIter, readlen, pIter->total); *ppBuf = pBuf; - *buflen = readlen; + *len = readlen; + return 0; + } else if (readlen < maxlen && errno == 0) { + mInfo("sdbiter:%p, read snapshot to the end, total:%" PRId64, pIter, pIter->total); + *ppBuf = pBuf; + *len = readlen; + *ppIter = NULL; + sdbCloseIter(pSdb, pIter); + return 0; + } else { + // impossible + mError("sdbiter:%p, read:%d bytes from snapshot, total:%" PRId64, pIter, readlen, pIter->total); + *ppBuf = NULL; + *len = 0; + *ppIter = NULL; + sdbCloseIter(pSdb, pIter); + taosMemoryFree(pBuf); + return -1; + } +} + +int32_t sdbApplySnapshot(SSdb *pSdb, char *pBuf, int32_t len) { + char datafile[PATH_MAX] = {0}; + char tmpfile[PATH_MAX] = {0}; + snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); + snprintf(tmpfile, sizeof(tmpfile), "%s%ssdb.data", pSdb->tmpDir, TD_DIRSEP); + + TdFilePtr pFile = taosOpenFile(tmpfile, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); + if (pFile == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + mError("failed to write %s since %s", tmpfile, terrstr()); + return -1; } - return pIter; -} + int32_t writelen = taosWriteFile(pFile, pBuf, len); + if (writelen != len) { + terrno = TAOS_SYSTEM_ERROR(errno); + mError("failed to write %s since %s", tmpfile, terrstr()); + taosCloseFile(&pFile); + return -1; + } + + if (taosFsyncFile(pFile) != 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + mError("failed to fsync %s since %s", tmpfile, terrstr()); + taosCloseFile(&pFile); + return -1; + } + + (void)taosCloseFile(&pFile); + + if (taosRenameFile(tmpfile, datafile) != 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + mError("failed to rename file %s to %s since %s", tmpfile, datafile, terrstr()); + return -1; + } + + if (sdbReadFile(pSdb) != 0) { + mError("failed to read from %s since %s", datafile, terrstr()); + return -1; + } + + return 0; +} \ No newline at end of file diff --git a/source/dnode/mnode/sdb/src/sdbRaw.c b/source/dnode/mnode/sdb/src/sdbRaw.c index 4b61ebb627..90643a54a9 100644 --- a/source/dnode/mnode/sdb/src/sdbRaw.c +++ b/source/dnode/mnode/sdb/src/sdbRaw.c @@ -16,9 +16,14 @@ #define _DEFAULT_SOURCE #include "sdb.h" -int32_t sdbGetIdFromRaw(SSdbRaw *pRaw) { - int32_t id = *((int32_t *)(pRaw->pData)); - return id; +int32_t sdbGetIdFromRaw(SSdb *pSdb, SSdbRaw *pRaw) { + EKeyType keytype = pSdb->keyTypes[pRaw->type]; + if (keytype == SDB_KEY_INT32) { + int32_t id = *((int32_t *)(pRaw->pData)); + return id; + } else { + return -2; + } } SSdbRaw *sdbAllocRaw(ESdbType type, int8_t sver, int32_t dataLen) { diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index 2b713ff980..e4343e3bbf 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -105,13 +105,15 @@ tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STab void *pMemRef); int32_t tsdbGetFileBlocksDistInfo(tsdbReaderT *pReader, STableBlockDistInfo *pTableBlockInfo); bool isTsdbCacheLastRow(tsdbReaderT *pReader); -int32_t tsdbGetAllTableList(SMeta* pMeta, uint64_t uid, SArray* list); +int32_t tsdbGetAllTableList(SMeta *pMeta, uint64_t uid, SArray *list); +void * tsdbGetIdx(SMeta *pMeta); int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT *pHandle); -bool tsdbNextDataBlock(tsdbReaderT pTsdbReadHandle); -void tsdbRetrieveDataBlockInfo(tsdbReaderT *pTsdbReadHandle, SDataBlockInfo *pBlockInfo); + +bool tsdbNextDataBlock(tsdbReaderT pTsdbReadHandle); +void tsdbRetrieveDataBlockInfo(tsdbReaderT *pTsdbReadHandle, SDataBlockInfo *pBlockInfo); int32_t tsdbRetrieveDataBlockStatisInfo(tsdbReaderT *pTsdbReadHandle, SColumnDataAgg ***pBlockStatis, bool *allHave); SArray *tsdbRetrieveDataBlock(tsdbReaderT *pTsdbReadHandle, SArray *pColumnIdList); -void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond *pCond); +void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond *pCond, int32_t tWinIdx); void tsdbCleanupReadHandle(tsdbReaderT queryHandle); // tq @@ -174,7 +176,7 @@ struct SMetaEntry { int64_t version; int8_t type; tb_uid_t uid; - char *name; + char * name; union { struct { SSchemaWrapper schemaRow; @@ -202,17 +204,17 @@ struct SMetaEntry { struct SMetaReader { int32_t flags; - SMeta *pMeta; + SMeta * pMeta; SDecoder coder; SMetaEntry me; - void *pBuf; + void * pBuf; int32_t szBuf; }; struct SMTbCursor { - TBC *pDbc; - void *pKey; - void *pVal; + TBC * pDbc; + void * pKey; + void * pVal; int32_t kLen; int32_t vLen; SMetaReader mr; diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index ba25c5e286..0e67d9e426 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -103,6 +103,7 @@ SArray* metaGetSmaTbUids(SMeta* pMeta); int32_t metaSnapshotReaderOpen(SMeta* pMeta, SMetaSnapshotReader** ppReader, int64_t sver, int64_t ever); int32_t metaSnapshotReaderClose(SMetaSnapshotReader* pReader); int32_t metaSnapshotRead(SMetaSnapshotReader* pReader, void** ppData, uint32_t* nData); +void* metaGetIdx(SMeta* pMeta); int32_t metaCreateTSma(SMeta* pMeta, int64_t version, SSmaCfg* pCfg); int32_t metaDropTSma(SMeta* pMeta, int64_t indexUid); diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 184b640bdd..605e804933 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -155,44 +155,52 @@ int metaTbCursorNext(SMTbCursor *pTbCur) { } SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, bool isinline) { - void *pKey = NULL; - void *pVal = NULL; - int kLen = 0; - int vLen = 0; - int ret; - SSkmDbKey skmDbKey; - SSchemaWrapper *pSW = NULL; - SSchema *pSchema = NULL; - void *pBuf; - SDecoder coder = {0}; + void *pData = NULL; + int nData = 0; + int64_t version; + SSchemaWrapper schema = {0}; + SSchemaWrapper *pSchema = NULL; + SDecoder dc = {0}; - // fetch - skmDbKey.uid = uid; - skmDbKey.sver = sver; - pKey = &skmDbKey; - kLen = sizeof(skmDbKey); metaRLock(pMeta); - ret = tdbTbGet(pMeta->pSkmDb, pKey, kLen, &pVal, &vLen); - metaULock(pMeta); - if (ret < 0) { - return NULL; + if (sver < 0) { + if (tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), &pData, &nData) < 0) { + goto _err; + } + + version = *(int64_t *)pData; + + tdbTbGet(pMeta->pTbDb, &(STbDbKey){.uid = uid, .version = version}, sizeof(STbDbKey), &pData, &nData); + + SMetaEntry me = {0}; + tDecoderInit(&dc, pData, nData); + metaDecodeEntry(&dc, &me); + if (me.type == TSDB_SUPER_TABLE) { + pSchema = tCloneSSchemaWrapper(&me.stbEntry.schemaRow); + } else if (me.type == TSDB_NORMAL_TABLE) { + } else { + ASSERT(0); + } + tDecoderClear(&dc); + } else { + if (tdbTbGet(pMeta->pSkmDb, &(SSkmDbKey){.uid = uid, .sver = sver}, sizeof(SSkmDbKey), &pData, &nData) < 0) { + goto _err; + } + + tDecoderInit(&dc, pData, nData); + tDecodeSSchemaWrapper(&dc, &schema); + pSchema = tCloneSSchemaWrapper(&schema); + tDecoderClear(&dc); } - // decode - pBuf = pVal; - pSW = taosMemoryMalloc(sizeof(SSchemaWrapper)); + metaULock(pMeta); + tdbFree(pData); + return pSchema; - tDecoderInit(&coder, pVal, vLen); - tDecodeSSchemaWrapper(&coder, pSW); - pSchema = taosMemoryMalloc(sizeof(SSchema) * pSW->nCols); - memcpy(pSchema, pSW->pSchema, sizeof(SSchema) * pSW->nCols); - tDecoderClear(&coder); - - pSW->pSchema = pSchema; - - tdbFree(pVal); - - return pSW; +_err: + metaULock(pMeta); + tdbFree(pData); + return NULL; } struct SMCtbCursor { diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 7182f496c4..f610f18126 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -31,9 +31,9 @@ int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { int vLen = 0; const void *pKey = NULL; const void *pVal = NULL; - void *pBuf = NULL; + void * pBuf = NULL; int32_t szBuf = 0; - void *p = NULL; + void * p = NULL; SMetaReader mr = {0}; // validate req @@ -87,7 +87,7 @@ int metaDropSTable(SMeta *pMeta, int64_t verison, SVDropStbReq *pReq) { } // drop all child tables - TBC *pCtbIdxc = NULL; + TBC * pCtbIdxc = NULL; SArray *pArray = taosArrayInit(8, sizeof(tb_uid_t)); tdbTbcOpen(pMeta->pCtbIdx, &pCtbIdxc, &pMeta->txn); @@ -142,8 +142,8 @@ _exit: int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { SMetaEntry oStbEntry = {0}; SMetaEntry nStbEntry = {0}; - TBC *pUidIdxc = NULL; - TBC *pTbDbc = NULL; + TBC * pUidIdxc = NULL; + TBC * pTbDbc = NULL; const void *pData; int nData; int64_t oversion; @@ -262,7 +262,7 @@ _err: } int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUids) { - void *pData = NULL; + void * pData = NULL; int nData = 0; int rc = 0; tb_uid_t uid; @@ -288,7 +288,7 @@ int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUi } static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) { - void *pData = NULL; + void * pData = NULL; int nData = 0; int rc = 0; int64_t version; @@ -324,14 +324,14 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) { } static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) { - void *pVal = NULL; + void * pVal = NULL; int nVal = 0; - const void *pData = NULL; + const void * pData = NULL; int nData = 0; int ret = 0; tb_uid_t uid; int64_t oversion; - SSchema *pColumn = NULL; + SSchema * pColumn = NULL; SMetaEntry entry = {0}; SSchemaWrapper *pSchema; int c; @@ -479,7 +479,7 @@ _err: static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) { SMetaEntry ctbEntry = {0}; SMetaEntry stbEntry = {0}; - void *pVal = NULL; + void * pVal = NULL; int nVal = 0; int ret; int c; @@ -510,7 +510,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA oversion = *(int64_t *)pData; // search table.db - TBC *pTbDbc = NULL; + TBC * pTbDbc = NULL; SDecoder dc1 = {0}; SDecoder dc2 = {0}; @@ -534,7 +534,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA metaDecodeEntry(&dc2, &stbEntry); SSchemaWrapper *pTagSchema = &stbEntry.stbEntry.schemaTag; - SSchema *pColumn = NULL; + SSchema * pColumn = NULL; int32_t iCol = 0; for (;;) { pColumn = NULL; @@ -639,8 +639,8 @@ int metaAlterTable(SMeta *pMeta, int64_t version, SVAlterTbReq *pReq) { static int metaSaveToTbDb(SMeta *pMeta, const SMetaEntry *pME) { STbDbKey tbDbKey; - void *pKey = NULL; - void *pVal = NULL; + void * pKey = NULL; + void * pVal = NULL; int kLen = 0; int vLen = 0; SEncoder coder = {0}; @@ -755,14 +755,14 @@ static void metaDestroyTagIdxKey(STagIdxKey *pTagIdxKey) { } static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { - void *pData = NULL; + void * pData = NULL; int nData = 0; STbDbKey tbDbKey = {0}; SMetaEntry stbEntry = {0}; - STagIdxKey *pTagIdxKey = NULL; + STagIdxKey * pTagIdxKey = NULL; int32_t nTagIdxKey; const SSchema *pTagColumn; // = &stbEntry.stbEntry.schema.pSchema[0]; - const void *pTagData = NULL; // + const void * pTagData = NULL; // SDecoder dc = {0}; // get super table @@ -804,7 +804,7 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { static int metaSaveToSkmDb(SMeta *pMeta, const SMetaEntry *pME) { SEncoder coder = {0}; - void *pVal = NULL; + void * pVal = NULL; int vLen = 0; int rcode = 0; SSkmDbKey skmDbKey = {0}; @@ -880,3 +880,11 @@ _err: metaULock(pMeta); return -1; } +// refactor later +void *metaGetIdx(SMeta *pMeta) { +#ifdef USE_INVERTED_INDEX + return pMeta->pTagIvtIdx; +#else + return pMeta->pTagIdx; +#endif +} diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 192016166a..96ce6e8eee 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -235,6 +235,15 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) { } } } + while (1) { + pIter = taosHashIterate(pTq->pStreamTasks, pIter); + if (pIter == NULL) break; + SStreamTask* pTask = (SStreamTask*)pIter; + if (pTask->inputType == STREAM_INPUT__DATA_SUBMIT) { + int32_t code = qUpdateQualifiedTableId(pTask->exec.executor, tbUidList, isAdd); + ASSERT(code == 0); + } + } return 0; } diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index c0b97f7536..fbfa70c117 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +#include "vnode.h" #include "tsdb.h" #define EXTRA_BYTES 2 @@ -140,12 +141,6 @@ typedef struct STsdbReadHandle { STSchema* pSchema; } STsdbReadHandle; -typedef struct STableGroupSupporter { - int32_t numOfCols; - SColIndex* pCols; - SSchema* pTagSchema; -} STableGroupSupporter; - static STimeWindow updateLastrowForEachGroup(STableListInfo* pList); static int32_t checkForCachedLastRow(STsdbReadHandle* pTsdbReadHandle, STableListInfo* pList); static int32_t checkForCachedLast(STsdbReadHandle* pTsdbReadHandle); @@ -211,12 +206,6 @@ int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT* pHandle) { return rows; } - // STableData* pMem = NULL; - // STableData* pIMem = NULL; - - // SMemTable* pMemT = pMemRef->snapshot.mem; - // SMemTable* pIMemT = pMemRef->snapshot.imem; - size_t size = taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo); for (int32_t i = 0; i < size; ++i) { STableCheckInfo* pCheckInfo = taosArrayGet(pTsdbReadHandle->pTableCheckInfo, i); @@ -259,8 +248,8 @@ static SArray* createCheckInfoFromTableGroup(STsdbReadHandle* pTsdbReadHandle, S } taosArrayPush(pTableCheckInfo, &info); - tsdbDebug("%p check table uid:%" PRId64 " from lastKey:%" PRId64 " %s", pTsdbReadHandle, info.tableId, - info.lastKey, pTsdbReadHandle->idStr); + tsdbDebug("%p check table uid:%" PRId64 " from lastKey:%" PRId64 " %s", pTsdbReadHandle, info.tableId, info.lastKey, + pTsdbReadHandle->idStr); } // TODO group table according to the tag value. @@ -317,28 +306,28 @@ static int64_t getEarliestValidTimestamp(STsdb* pTsdb) { return now - (tsTickPerMin[pCfg->precision] * pCfg->keep2) + 1; // needs to add one tick } -static void setQueryTimewindow(STsdbReadHandle* pTsdbReadHandle, SQueryTableDataCond* pCond) { - pTsdbReadHandle->window = pCond->twindow; +static void setQueryTimewindow(STsdbReadHandle* pTsdbReadHandle, SQueryTableDataCond* pCond, int32_t tWinIdx) { + pTsdbReadHandle->window = pCond->twindows[tWinIdx]; bool updateTs = false; int64_t startTs = getEarliestValidTimestamp(pTsdbReadHandle->pTsdb); if (ASCENDING_TRAVERSE(pTsdbReadHandle->order)) { if (startTs > pTsdbReadHandle->window.skey) { pTsdbReadHandle->window.skey = startTs; - pCond->twindow.skey = startTs; + pCond->twindows[tWinIdx].skey = startTs; updateTs = true; } } else { if (startTs > pTsdbReadHandle->window.ekey) { pTsdbReadHandle->window.ekey = startTs; - pCond->twindow.ekey = startTs; + pCond->twindows[tWinIdx].ekey = startTs; updateTs = true; } } if (updateTs) { tsdbDebug("%p update the query time window, old:%" PRId64 " - %" PRId64 ", new:%" PRId64 " - %" PRId64 ", %s", - pTsdbReadHandle, pCond->twindow.skey, pCond->twindow.ekey, pTsdbReadHandle->window.skey, + pTsdbReadHandle, pCond->twindows[tWinIdx].skey, pCond->twindows[tWinIdx].ekey, pTsdbReadHandle->window.skey, pTsdbReadHandle->window.ekey, pTsdbReadHandle->idStr); } } @@ -363,13 +352,16 @@ static STsdb* getTsdbByRetentions(SVnode* pVnode, STsdbReadHandle* pReadHandle, } if (level == TSDB_RETENTION_L0) { - tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L0); + tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, + TSDB_RETENTION_L0); return VND_RSMA0(pVnode); } else if (level == TSDB_RETENTION_L1) { - tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L1); + tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, + TSDB_RETENTION_L1); return VND_RSMA1(pVnode); } else { - tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L2); + tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, + TSDB_RETENTION_L2); return VND_RSMA2(pVnode); } } @@ -382,7 +374,7 @@ static STsdbReadHandle* tsdbQueryTablesImpl(SVnode* pVnode, SQueryTableDataCond* goto _end; } - STsdb* pTsdb = getTsdbByRetentions(pVnode, pReadHandle, pCond->twindow.skey, pVnode->config.tsdbCfg.retentions); + STsdb* pTsdb = getTsdbByRetentions(pVnode, pReadHandle, pCond->twindows[0].skey, pVnode->config.tsdbCfg.retentions); pReadHandle->order = pCond->order; pReadHandle->pTsdb = pTsdb; @@ -408,11 +400,11 @@ static STsdbReadHandle* tsdbQueryTablesImpl(SVnode* pVnode, SQueryTableDataCond* } assert(pCond != NULL); - setQueryTimewindow(pReadHandle, pCond); + setQueryTimewindow(pReadHandle, pCond, 0); if (pCond->numOfCols > 0) { int32_t rowLen = 0; - for(int32_t i = 0; i < pCond->numOfCols; ++i) { + for (int32_t i = 0; i < pCond->numOfCols; ++i) { rowLen += pCond->colList[i].bytes; } @@ -447,10 +439,10 @@ static STsdbReadHandle* tsdbQueryTablesImpl(SVnode* pVnode, SQueryTableDataCond* } pReadHandle->suppInfo.defaultLoadColumn = getDefaultLoadColumns(pReadHandle, true); - pReadHandle->suppInfo.slotIds = - taosMemoryMalloc(sizeof(int32_t) * taosArrayGetSize(pReadHandle->suppInfo.defaultLoadColumn)); - pReadHandle->suppInfo.plist = - taosMemoryCalloc(taosArrayGetSize(pReadHandle->suppInfo.defaultLoadColumn), POINTER_BYTES); + + size_t size = taosArrayGetSize(pReadHandle->suppInfo.defaultLoadColumn); + pReadHandle->suppInfo.slotIds = taosMemoryCalloc(size, sizeof(int32_t)); + pReadHandle->suppInfo.plist = taosMemoryCalloc(size, POINTER_BYTES); } pReadHandle->pDataCols = tdNewDataCols(1000, pVnode->config.tsdbCfg.maxRows); @@ -471,6 +463,39 @@ _end: return NULL; } +static int32_t setCurrentSchema(SVnode* pVnode, STsdbReadHandle* pTsdbReadHandle) { + STableCheckInfo* pCheckInfo = taosArrayGet(pTsdbReadHandle->pTableCheckInfo, 0); + + int32_t sversion = 1; + + SMetaReader mr = {0}; + metaReaderInit(&mr, pVnode->pMeta, 0); + int32_t code = metaGetTableEntryByUid(&mr, pCheckInfo->tableId); + if (code != TSDB_CODE_SUCCESS) { + terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; + metaReaderClear(&mr); + return terrno; + } + + if (mr.me.type == TSDB_CHILD_TABLE) { + tb_uid_t suid = mr.me.ctbEntry.suid; + code = metaGetTableEntryByUid(&mr, suid); + if (code != TSDB_CODE_SUCCESS) { + terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; + metaReaderClear(&mr); + return terrno; + } + sversion = mr.me.stbEntry.schemaRow.version; + } else { + ASSERT(mr.me.type == TSDB_NORMAL_TABLE); + sversion = mr.me.ntbEntry.schemaRow.version; + } + + metaReaderClear(&mr); + pTsdbReadHandle->pSchema = metaGetTbTSchema(pVnode->pMeta, pCheckInfo->tableId, sversion); + return TSDB_CODE_SUCCESS; +} + tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableListInfo* tableList, uint64_t qId, uint64_t taskId) { STsdbReadHandle* pTsdbReadHandle = tsdbQueryTablesImpl(pVnode, pCond, qId, taskId); @@ -490,9 +515,12 @@ tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableL return NULL; } - STableCheckInfo* pCheckInfo = taosArrayGet(pTsdbReadHandle->pTableCheckInfo, 0); + int32_t code = setCurrentSchema(pVnode, pTsdbReadHandle); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + return NULL; + } - pTsdbReadHandle->pSchema = metaGetTbTSchema(pVnode->pMeta, pCheckInfo->tableId, 1); int32_t numOfCols = taosArrayGetSize(pTsdbReadHandle->suppInfo.defaultLoadColumn); int16_t* ids = pTsdbReadHandle->suppInfo.defaultLoadColumn->pData; @@ -520,7 +548,7 @@ tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableL return (tsdbReaderT)pTsdbReadHandle; } -void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond) { +void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond, int32_t tWinIdx) { STsdbReadHandle* pTsdbReadHandle = queryHandle; if (emptyQueryTimewindow(pTsdbReadHandle)) { @@ -533,7 +561,7 @@ void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond) { } pTsdbReadHandle->order = pCond->order; - pTsdbReadHandle->window = pCond->twindow; + setQueryTimewindow(pTsdbReadHandle, pCond, tWinIdx); pTsdbReadHandle->type = TSDB_QUERY_TYPE_ALL; pTsdbReadHandle->cur.fid = -1; pTsdbReadHandle->cur.win = TSWINDOW_INITIALIZER; @@ -558,11 +586,11 @@ void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond) { resetCheckInfo(pTsdbReadHandle); } -void tsdbResetQueryHandleForNewTable(tsdbReaderT queryHandle, SQueryTableDataCond* pCond, STableListInfo* tableList) { +void tsdbResetQueryHandleForNewTable(tsdbReaderT queryHandle, SQueryTableDataCond* pCond, STableListInfo* tableList, int32_t tWinIdx) { STsdbReadHandle* pTsdbReadHandle = queryHandle; pTsdbReadHandle->order = pCond->order; - pTsdbReadHandle->window = pCond->twindow; + pTsdbReadHandle->window = pCond->twindows[tWinIdx]; pTsdbReadHandle->type = TSDB_QUERY_TYPE_ALL; pTsdbReadHandle->cur.fid = -1; pTsdbReadHandle->cur.win = TSWINDOW_INITIALIZER; @@ -602,7 +630,7 @@ void tsdbResetQueryHandleForNewTable(tsdbReaderT queryHandle, SQueryTableDataCon tsdbReaderT tsdbQueryLastRow(SVnode* pVnode, SQueryTableDataCond* pCond, STableListInfo* pList, uint64_t qId, uint64_t taskId) { - pCond->twindow = updateLastrowForEachGroup(pList); + pCond->twindows[0] = updateLastrowForEachGroup(pList); // no qualified table if (taosArrayGetSize(pList->pTableList) == 0) { @@ -620,7 +648,7 @@ tsdbReaderT tsdbQueryLastRow(SVnode* pVnode, SQueryTableDataCond* pCond, STableL return NULL; } - assert(pCond->order == TSDB_ORDER_ASC && pCond->twindow.skey <= pCond->twindow.ekey); + assert(pCond->order == TSDB_ORDER_ASC && pCond->twindows[0].skey <= pCond->twindows[0].ekey); if (pTsdbReadHandle->cachelastrow) { pTsdbReadHandle->type = TSDB_QUERY_TYPE_LAST; } @@ -660,7 +688,7 @@ SArray* tsdbGetQueriedTableList(tsdbReaderT* pHandle) { } // leave only one table for each group -//static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGroupList) { +// static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGroupList) { // assert(pGroupList); // size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList); // @@ -692,7 +720,7 @@ SArray* tsdbGetQueriedTableList(tsdbReaderT* pHandle) { // return pNew; //} -//tsdbReaderT tsdbQueryRowsInExternalWindow(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, +// tsdbReaderT tsdbQueryRowsInExternalWindow(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, // uint64_t qId, uint64_t taskId) { // STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList); // @@ -1299,7 +1327,6 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock* if ((ascScan && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) || (!ascScan && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) { - bool cacheDataInFileBlockHole = (ascScan && (key != TSKEY_INITIAL_VAL && key < binfo.window.skey)) || (!ascScan && (key != TSKEY_INITIAL_VAL && key > binfo.window.ekey)); if (cacheDataInFileBlockHole) { @@ -1342,7 +1369,7 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock* pTsdbReadHandle->realNumOfRows = binfo.rows; cur->rows = binfo.rows; - cur->win = binfo.window; + cur->win = binfo.window; cur->mixBlock = false; cur->blockCompleted = true; @@ -1353,9 +1380,9 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock* cur->lastKey = binfo.window.skey - 1; cur->pos = -1; } - } else { // partially copy to dest buffer + } else { // partially copy to dest buffer // make sure to only load once - bool firstTimeExtract = ((cur->pos == 0 && ascScan) || (cur->pos == binfo.rows -1 && (!ascScan))); + bool firstTimeExtract = ((cur->pos == 0 && ascScan) || (cur->pos == binfo.rows - 1 && (!ascScan))); if (pTsdbReadHandle->outputCapacity < binfo.rows && firstTimeExtract) { code = doLoadFileDataBlock(pTsdbReadHandle, pBlock, pCheckInfo, cur->slot); if (code != TSDB_CODE_SUCCESS) { @@ -1864,7 +1891,7 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order); - int32_t step = ascScan? 1 : -1; + int32_t step = ascScan ? 1 : -1; int32_t start = cur->pos; int32_t end = endPos; @@ -1879,8 +1906,8 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa // the time window should always be ascending order: skey <= ekey cur->win = (STimeWindow){.skey = tsArray[start], .ekey = tsArray[end]}; cur->mixBlock = (numOfRows != pBlockInfo->rows); - cur->lastKey = tsArray[endPos] + step; - cur->blockCompleted = (ascScan? (endPos == pBlockInfo->rows - 1):(endPos == 0)); + cur->lastKey = tsArray[endPos] + step; + cur->blockCompleted = (ascScan ? (endPos == pBlockInfo->rows - 1) : (endPos == 0)); // The value of pos may be -1 or pBlockInfo->rows, and it is invalid in both cases. int32_t pos = endPos + step; @@ -1896,7 +1923,7 @@ int32_t getEndPosInDataBlock(STsdbReadHandle* pTsdbReadHandle, SDataBlockInfo* p // NOTE: reverse the order to find the end position in data block int32_t endPos = -1; bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order); - int32_t order = ascScan? TSDB_ORDER_DESC : TSDB_ORDER_ASC; + int32_t order = ascScan ? TSDB_ORDER_DESC : TSDB_ORDER_ASC; SQueryFilePos* cur = &pTsdbReadHandle->cur; SDataCols* pCols = pTsdbReadHandle->rhelper.pDCols[0]; @@ -1956,7 +1983,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf assert(pCols->numOfRows == pBlock->numOfRows && tsArray[0] == pBlock->keyFirst && tsArray[pBlock->numOfRows - 1] == pBlock->keyLast); - bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order); + bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order); int32_t step = ascScan ? 1 : -1; // for search the endPos, so the order needs to reverse @@ -1967,8 +1994,9 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf STimeWindow* pWin = &blockInfo.window; tsdbDebug("%p uid:%" PRIu64 " start merge data block, file block range:%" PRIu64 "-%" PRIu64 - " rows:%d, start:%d, end:%d, %s", pTsdbReadHandle, pCheckInfo->tableId, pWin->skey, pWin->ekey, blockInfo.rows, - cur->pos, endPos, pTsdbReadHandle->idStr); + " rows:%d, start:%d, end:%d, %s", + pTsdbReadHandle, pCheckInfo->tableId, pWin->skey, pWin->ekey, blockInfo.rows, cur->pos, endPos, + pTsdbReadHandle->idStr); // compared with the data from in-memory buffer, to generate the correct timestamp array list int32_t numOfRows = 0; @@ -2087,8 +2115,9 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf } // still assign data into current row - numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols, - pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend); + numOfRows += + mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols, + pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend); if (cur->win.skey == TSKEY_INITIAL_VAL) { cur->win.skey = key; @@ -2153,8 +2182,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf * if cache is empty, load remain file block data. In contrast, if there are remain data in cache, do NOT * copy them all to result buffer, since it may be overlapped with file data block. */ - if (node == NULL || - ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) > pTsdbReadHandle->window.ekey) && ascScan) || + if (node == NULL || ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) > pTsdbReadHandle->window.ekey) && ascScan) || ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) < pTsdbReadHandle->window.ekey) && !ascScan)) { // no data in cache or data in cache is greater than the ekey of time window, load data from file block if (cur->win.skey == TSKEY_INITIAL_VAL) { @@ -2175,7 +2203,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf } cur->blockCompleted = (((pos > endPos || cur->lastKey > pTsdbReadHandle->window.ekey) && ascScan) || - ((pos < endPos || cur->lastKey < pTsdbReadHandle->window.ekey) && !ascScan)); + ((pos < endPos || cur->lastKey < pTsdbReadHandle->window.ekey) && !ascScan)); if (!ascScan) { TSWAP(cur->win.skey, cur->win.ekey); @@ -2794,6 +2822,12 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int return numOfRows; } +void* tsdbGetIdx(SMeta* pMeta) { + if (pMeta == NULL) { + return NULL; + } + return metaGetIdx(pMeta); +} int32_t tsdbGetAllTableList(SMeta* pMeta, uint64_t uid, SArray* list) { SMCtbCursor* pCur = metaOpenCtbCursor(pMeta, uid); @@ -3382,65 +3416,65 @@ int32_t checkForCachedLast(STsdbReadHandle* pTsdbReadHandle) { STimeWindow updateLastrowForEachGroup(STableListInfo* pList) { STimeWindow window = {INT64_MAX, INT64_MIN}; -// int32_t totalNumOfTable = 0; -// SArray* emptyGroup = taosArrayInit(16, sizeof(int32_t)); -// -// // NOTE: starts from the buffer in case of descending timestamp order check data blocks -// size_t numOfGroups = taosArrayGetSize(groupList->pGroupList); -// for (int32_t j = 0; j < numOfGroups; ++j) { -// SArray* pGroup = taosArrayGetP(groupList->pGroupList, j); -// TSKEY key = TSKEY_INITIAL_VAL; -// -// STableKeyInfo keyInfo = {0}; -// -// size_t numOfTables = taosArrayGetSize(pGroup); -// for (int32_t i = 0; i < numOfTables; ++i) { -// STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(pGroup, i); -// -// // if the lastKey equals to INT64_MIN, there is no data in this table -// TSKEY lastKey = 0; //((STable*)(pInfo->pTable))->lastKey; -// if (key < lastKey) { -// key = lastKey; -// -// // keyInfo.pTable = pInfo->pTable; -// keyInfo.lastKey = key; -// pInfo->lastKey = key; -// -// if (key < window.skey) { -// window.skey = key; -// } -// -// if (key > window.ekey) { -// window.ekey = key; -// } -// } -// } -// -// // more than one table in each group, only one table left for each group -// // if (keyInfo.pTable != NULL) { -// // totalNumOfTable++; -// // if (taosArrayGetSize(pGroup) == 1) { -// // // do nothing -// // } else { -// // taosArrayClear(pGroup); -// // taosArrayPush(pGroup, &keyInfo); -// // } -// // } else { // mark all the empty groups, and remove it later -// // taosArrayDestroy(pGroup); -// // taosArrayPush(emptyGroup, &j); -// // } -// } -// -// // window does not being updated, so set the original -// if (window.skey == INT64_MAX && window.ekey == INT64_MIN) { -// window = TSWINDOW_INITIALIZER; -// assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == numOfGroups); -// } -// -// taosArrayRemoveBatch(groupList->pGroupList, TARRAY_GET_START(emptyGroup), (int32_t)taosArrayGetSize(emptyGroup)); -// taosArrayDestroy(emptyGroup); -// -// groupList->numOfTables = totalNumOfTable; + // int32_t totalNumOfTable = 0; + // SArray* emptyGroup = taosArrayInit(16, sizeof(int32_t)); + // + // // NOTE: starts from the buffer in case of descending timestamp order check data blocks + // size_t numOfGroups = taosArrayGetSize(groupList->pGroupList); + // for (int32_t j = 0; j < numOfGroups; ++j) { + // SArray* pGroup = taosArrayGetP(groupList->pGroupList, j); + // TSKEY key = TSKEY_INITIAL_VAL; + // + // STableKeyInfo keyInfo = {0}; + // + // size_t numOfTables = taosArrayGetSize(pGroup); + // for (int32_t i = 0; i < numOfTables; ++i) { + // STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(pGroup, i); + // + // // if the lastKey equals to INT64_MIN, there is no data in this table + // TSKEY lastKey = 0; //((STable*)(pInfo->pTable))->lastKey; + // if (key < lastKey) { + // key = lastKey; + // + // // keyInfo.pTable = pInfo->pTable; + // keyInfo.lastKey = key; + // pInfo->lastKey = key; + // + // if (key < window.skey) { + // window.skey = key; + // } + // + // if (key > window.ekey) { + // window.ekey = key; + // } + // } + // } + // + // // more than one table in each group, only one table left for each group + // // if (keyInfo.pTable != NULL) { + // // totalNumOfTable++; + // // if (taosArrayGetSize(pGroup) == 1) { + // // // do nothing + // // } else { + // // taosArrayClear(pGroup); + // // taosArrayPush(pGroup, &keyInfo); + // // } + // // } else { // mark all the empty groups, and remove it later + // // taosArrayDestroy(pGroup); + // // taosArrayPush(emptyGroup, &j); + // // } + // } + // + // // window does not being updated, so set the original + // if (window.skey == INT64_MAX && window.ekey == INT64_MIN) { + // window = TSWINDOW_INITIALIZER; + // assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == numOfGroups); + // } + // + // taosArrayRemoveBatch(groupList->pGroupList, TARRAY_GET_START(emptyGroup), (int32_t)taosArrayGetSize(emptyGroup)); + // taosArrayDestroy(emptyGroup); + // + // groupList->numOfTables = totalNumOfTable; return window; } @@ -3471,7 +3505,6 @@ void tsdbRetrieveDataBlockInfo(tsdbReaderT* pTsdbReadHandle, SDataBlockInfo* pDa pDataBlockInfo->rows = cur->rows; pDataBlockInfo->window = cur->win; - // ASSERT(pDataBlockInfo->numOfCols >= (int32_t)(QH_GET_NUM_OF_COLS(pHandle)); } /* @@ -3537,9 +3570,9 @@ int32_t tsdbRetrieveDataBlockStatisInfo(tsdbReaderT* pTsdbReadHandle, SColumnDat if (IS_BSMA_ON(&(pHandle->pSchema->columns[slotIds[i]]))) { if (pHandle->suppInfo.pstatis[i].numOfNull == -1) { // set the column data are all NULL pHandle->suppInfo.pstatis[i].numOfNull = pBlockInfo->compBlock->numOfRows; - } else { - pHandle->suppInfo.plist[i] = &pHandle->suppInfo.pstatis[i]; } + + pHandle->suppInfo.plist[i] = &pHandle->suppInfo.pstatis[i]; } else { *allHave = false; } @@ -3588,108 +3621,6 @@ SArray* tsdbRetrieveDataBlock(tsdbReaderT* pTsdbReadHandle, SArray* pIdList) { } } } -#if 0 -void filterPrepare(void* expr, void* param) { - tExprNode* pExpr = (tExprNode*)expr; - if (pExpr->_node.info != NULL) { - return; - } - - pExpr->_node.info = taosMemoryCalloc(1, sizeof(tQueryInfo)); - - STSchema* pTSSchema = (STSchema*) param; - tQueryInfo* pInfo = pExpr->_node.info; - tVariant* pCond = pExpr->_node.pRight->pVal; - SSchema* pSchema = pExpr->_node.pLeft->pSchema; - - pInfo->sch = *pSchema; - pInfo->optr = pExpr->_node.optr; - pInfo->compare = getComparFunc(pInfo->sch.type, pInfo->optr); - pInfo->indexed = pTSSchema->columns->colId == pInfo->sch.colId; - - if (pInfo->optr == TSDB_RELATION_IN) { - int dummy = -1; - SHashObj *pObj = NULL; - if (pInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) { - pObj = taosHashInit(256, taosGetDefaultHashFunction(pInfo->sch.type), true, false); - SArray *arr = (SArray *)(pCond->arr); - for (size_t i = 0; i < taosArrayGetSize(arr); i++) { - char* p = taosArrayGetP(arr, i); - strntolower_s(varDataVal(p), varDataVal(p), varDataLen(p)); - taosHashPut(pObj, varDataVal(p), varDataLen(p), &dummy, sizeof(dummy)); - } - } else { - buildFilterSetFromBinary((void **)&pObj, pCond->pz, pCond->nLen); - } - pInfo->q = (char *)pObj; - } else if (pCond != NULL) { - uint32_t size = pCond->nLen * TSDB_NCHAR_SIZE; - if (size < (uint32_t)pSchema->bytes) { - size = pSchema->bytes; - } - // to make sure tonchar does not cause invalid write, since the '\0' needs at least sizeof(TdUcs4) space. - pInfo->q = taosMemoryCalloc(1, size + TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); - tVariantDump(pCond, pInfo->q, pSchema->type, true); - } -} - -#endif - -static int32_t tableGroupComparFn(const void* p1, const void* p2, const void* param) { -#if 0 - STableGroupSupporter* pTableGroupSupp = (STableGroupSupporter*) param; - STable* pTable1 = ((STableKeyInfo*) p1)->uid; - STable* pTable2 = ((STableKeyInfo*) p2)->uid; - - for (int32_t i = 0; i < pTableGroupSupp->numOfCols; ++i) { - SColIndex* pColIndex = &pTableGroupSupp->pCols[i]; - int32_t colIndex = pColIndex->colIndex; - - assert(colIndex >= TSDB_TBNAME_COLUMN_INDEX); - - char * f1 = NULL; - char * f2 = NULL; - int32_t type = 0; - int32_t bytes = 0; - - if (colIndex == TSDB_TBNAME_COLUMN_INDEX) { - f1 = (char*) TABLE_NAME(pTable1); - f2 = (char*) TABLE_NAME(pTable2); - type = TSDB_DATA_TYPE_BINARY; - bytes = tGetTbnameColumnSchema()->bytes; - } else { - if (pTableGroupSupp->pTagSchema && colIndex < pTableGroupSupp->pTagSchema->numOfCols) { - STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex); - bytes = pCol->bytes; - type = pCol->type; - f1 = tdGetKVRowValOfCol(pTable1->tagVal, pCol->colId); - f2 = tdGetKVRowValOfCol(pTable2->tagVal, pCol->colId); - } - } - - // this tags value may be NULL - if (f1 == NULL && f2 == NULL) { - continue; - } - - if (f1 == NULL) { - return -1; - } - - if (f2 == NULL) { - return 1; - } - - int32_t ret = doCompare(f1, f2, type, bytes); - if (ret == 0) { - continue; - } else { - return ret; - } - } -#endif - return 0; -} static int tsdbCheckInfoCompar(const void* key1, const void* key2) { if (((STableCheckInfo*)key1)->tableId < ((STableCheckInfo*)key2)->tableId) { @@ -3702,170 +3633,6 @@ static int tsdbCheckInfoCompar(const void* key1, const void* key2) { } } -void createTableGroupImpl(SArray* pGroups, SArray* pTableList, size_t numOfTables, TSKEY skey, - STableGroupSupporter* pSupp, __ext_compar_fn_t compareFn) { - STable* pTable = taosArrayGetP(pTableList, 0); - SArray* g = taosArrayInit(16, sizeof(STableKeyInfo)); - - STableKeyInfo info = {.lastKey = skey}; - taosArrayPush(g, &info); - - for (int32_t i = 1; i < numOfTables; ++i) { - STable** prev = taosArrayGet(pTableList, i - 1); - STable** p = taosArrayGet(pTableList, i); - - int32_t ret = compareFn(prev, p, pSupp); - assert(ret == 0 || ret == -1); - - if (ret == 0) { - STableKeyInfo info1 = {.lastKey = skey}; - taosArrayPush(g, &info1); - } else { - taosArrayPush(pGroups, &g); // current group is ended, start a new group - g = taosArrayInit(16, sizeof(STableKeyInfo)); - - STableKeyInfo info1 = {.lastKey = skey}; - taosArrayPush(g, &info1); - } - } - - taosArrayPush(pGroups, &g); -} - -SArray* createTableGroup(SArray* pTableList, SSchemaWrapper* pTagSchema, SColIndex* pCols, int32_t numOfOrderCols, - TSKEY skey) { - assert(pTableList != NULL); - SArray* pTableGroup = taosArrayInit(1, POINTER_BYTES); - - size_t size = taosArrayGetSize(pTableList); - if (size == 0) { - tsdbDebug("no qualified tables"); - return pTableGroup; - } - - if (numOfOrderCols == 0 || size == 1) { // no group by tags clause or only one table - SArray* sa = taosArrayDup(pTableList); - if (sa == NULL) { - taosArrayDestroy(pTableGroup); - return NULL; - } - - taosArrayPush(pTableGroup, &sa); - tsdbDebug("all %" PRIzu " tables belong to one group", size); - } else { - STableGroupSupporter sup = {0}; - sup.numOfCols = numOfOrderCols; - sup.pTagSchema = pTagSchema->pSchema; - sup.pCols = pCols; - - taosqsort(pTableList->pData, size, sizeof(STableKeyInfo), &sup, tableGroupComparFn); - createTableGroupImpl(pTableGroup, pTableList, size, skey, &sup, tableGroupComparFn); - } - - return pTableGroup; -} - -// static bool tableFilterFp(const void* pNode, void* param) { -// tQueryInfo* pInfo = (tQueryInfo*) param; -// -// STable* pTable = (STable*)(SL_GET_NODE_DATA((SSkipListNode*)pNode)); -// -// char* val = NULL; -// if (pInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) { -// val = (char*) TABLE_NAME(pTable); -// } else { -// val = tdGetKVRowValOfCol(pTable->tagVal, pInfo->sch.colId); -// } -// -// if (pInfo->optr == TSDB_RELATION_ISNULL || pInfo->optr == TSDB_RELATION_NOTNULL) { -// if (pInfo->optr == TSDB_RELATION_ISNULL) { -// return (val == NULL) || isNull(val, pInfo->sch.type); -// } else if (pInfo->optr == TSDB_RELATION_NOTNULL) { -// return (val != NULL) && (!isNull(val, pInfo->sch.type)); -// } -// } else if (pInfo->optr == TSDB_RELATION_IN) { -// int type = pInfo->sch.type; -// if (type == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_TIMESTAMP) { -// int64_t v; -// GET_TYPED_DATA(v, int64_t, pInfo->sch.type, val); -// return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v)); -// } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { -// uint64_t v; -// GET_TYPED_DATA(v, uint64_t, pInfo->sch.type, val); -// return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v)); -// } -// else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) { -// double v; -// GET_TYPED_DATA(v, double, pInfo->sch.type, val); -// return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v)); -// } else if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR){ -// return NULL != taosHashGet((SHashObj *)pInfo->q, varDataVal(val), varDataLen(val)); -// } -// -// } -// -// int32_t ret = 0; -// if (val == NULL) { //the val is possible to be null, so check it out carefully -// ret = -1; // val is missing in table tags value pairs -// } else { -// ret = pInfo->compare(val, pInfo->q); -// } -// -// switch (pInfo->optr) { -// case TSDB_RELATION_EQUAL: { -// return ret == 0; -// } -// case TSDB_RELATION_NOT_EQUAL: { -// return ret != 0; -// } -// case TSDB_RELATION_GREATER_EQUAL: { -// return ret >= 0; -// } -// case TSDB_RELATION_GREATER: { -// return ret > 0; -// } -// case TSDB_RELATION_LESS_EQUAL: { -// return ret <= 0; -// } -// case TSDB_RELATION_LESS: { -// return ret < 0; -// } -// case TSDB_RELATION_LIKE: { -// return ret == 0; -// } -// case TSDB_RELATION_MATCH: { -// return ret == 0; -// } -// case TSDB_RELATION_NMATCH: { -// return ret == 0; -// } -// case TSDB_RELATION_IN: { -// return ret == 1; -// } -// -// default: -// assert(false); -// } -// -// return true; -//} - -// static void getTableListfromSkipList(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp -// *param); - -// static int32_t doQueryTableList(STable* pSTable, SArray* pRes, tExprNode* pExpr) { -// // // query according to the expression tree -// SExprTraverseSupp supp = { -// .nodeFilterFn = (__result_filter_fn_t)tableFilterFp, -// .setupInfoFn = filterPrepare, -// .pExtInfo = pSTable->tagSchema, -// }; -// -// getTableListfromSkipList(pExpr, pSTable->pIndex, pRes, &supp); -// tExprTreeDestroy(pExpr, destroyHelper); -// return TSDB_CODE_SUCCESS; -//} - static void* doFreeColumnInfoData(SArray* pColumnInfoData) { if (pColumnInfoData == NULL) { return NULL; @@ -3934,263 +3701,3 @@ void tsdbCleanupReadHandle(tsdbReaderT queryHandle) { taosMemoryFreeClear(pTsdbReadHandle); } - -#if 0 - -static void applyFilterToSkipListNode(SSkipList *pSkipList, tExprNode *pExpr, SArray *pResult, SExprTraverseSupp *param) { - SSkipListIterator* iter = tSkipListCreateIter(pSkipList); - - // Scan each node in the skiplist by using iterator - while (tSkipListIterNext(iter)) { - SSkipListNode *pNode = tSkipListIterGet(iter); - if (exprTreeApplyFilter(pExpr, pNode, param)) { - taosArrayPush(pResult, &(SL_GET_NODE_DATA(pNode))); - } - } - - tSkipListDestroyIter(iter); -} - -typedef struct { - char* v; - int32_t optr; -} SEndPoint; - -typedef struct { - SEndPoint* start; - SEndPoint* end; -} SQueryCond; - -// todo check for malloc failure -static int32_t setQueryCond(tQueryInfo *queryColInfo, SQueryCond* pCond) { - int32_t optr = queryColInfo->optr; - - if (optr == TSDB_RELATION_GREATER || optr == TSDB_RELATION_GREATER_EQUAL || - optr == TSDB_RELATION_EQUAL || optr == TSDB_RELATION_NOT_EQUAL) { - pCond->start = taosMemoryCalloc(1, sizeof(SEndPoint)); - pCond->start->optr = queryColInfo->optr; - pCond->start->v = queryColInfo->q; - } else if (optr == TSDB_RELATION_LESS || optr == TSDB_RELATION_LESS_EQUAL) { - pCond->end = taosMemoryCalloc(1, sizeof(SEndPoint)); - pCond->end->optr = queryColInfo->optr; - pCond->end->v = queryColInfo->q; - } else if (optr == TSDB_RELATION_IN) { - pCond->start = taosMemoryCalloc(1, sizeof(SEndPoint)); - pCond->start->optr = queryColInfo->optr; - pCond->start->v = queryColInfo->q; - } else if (optr == TSDB_RELATION_LIKE) { - assert(0); - } else if (optr == TSDB_RELATION_MATCH) { - assert(0); - } else if (optr == TSDB_RELATION_NMATCH) { - assert(0); - } - - return TSDB_CODE_SUCCESS; -} - -static void queryIndexedColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArray* result) { - SSkipListIterator* iter = NULL; - - SQueryCond cond = {0}; - if (setQueryCond(pQueryInfo, &cond) != TSDB_CODE_SUCCESS) { - //todo handle error - } - - if (cond.start != NULL) { - iter = tSkipListCreateIterFromVal(pSkipList, (char*) cond.start->v, pSkipList->type, TSDB_ORDER_ASC); - } else { - iter = tSkipListCreateIterFromVal(pSkipList, (char*)(cond.end ? cond.end->v: NULL), pSkipList->type, TSDB_ORDER_DESC); - } - - if (cond.start != NULL) { - int32_t optr = cond.start->optr; - - if (optr == TSDB_RELATION_EQUAL) { // equals - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - - int32_t ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v); - if (ret != 0) { - break; - } - - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - } else if (optr == TSDB_RELATION_GREATER || optr == TSDB_RELATION_GREATER_EQUAL) { // greater equal - bool comp = true; - int32_t ret = 0; - - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - - if (comp) { - ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v); - assert(ret >= 0); - } - - if (ret == 0 && optr == TSDB_RELATION_GREATER) { - continue; - } else { - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - comp = false; - } - } - } else if (optr == TSDB_RELATION_NOT_EQUAL) { // not equal - bool comp = true; - - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - comp = comp && (pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v) == 0); - if (comp) { - continue; - } - - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - - tSkipListDestroyIter(iter); - - comp = true; - iter = tSkipListCreateIterFromVal(pSkipList, (char*) cond.start->v, pSkipList->type, TSDB_ORDER_DESC); - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - comp = comp && (pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v) == 0); - if (comp) { - continue; - } - - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - - } else if (optr == TSDB_RELATION_IN) { - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - - int32_t ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v); - if (ret != 0) { - break; - } - - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - - } else { - assert(0); - } - } else { - int32_t optr = cond.end ? cond.end->optr : TSDB_RELATION_INVALID; - if (optr == TSDB_RELATION_LESS || optr == TSDB_RELATION_LESS_EQUAL) { - bool comp = true; - int32_t ret = 0; - - while (tSkipListIterNext(iter)) { - SSkipListNode *pNode = tSkipListIterGet(iter); - - if (comp) { - ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.end->v); - assert(ret <= 0); - } - - if (ret == 0 && optr == TSDB_RELATION_LESS) { - continue; - } else { - STableKeyInfo info = {.pTable = (void *)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - comp = false; // no need to compare anymore - } - } - } else { - assert(pQueryInfo->optr == TSDB_RELATION_ISNULL || pQueryInfo->optr == TSDB_RELATION_NOTNULL); - - while (tSkipListIterNext(iter)) { - SSkipListNode *pNode = tSkipListIterGet(iter); - - bool isnull = isNull(SL_GET_NODE_KEY(pSkipList, pNode), pQueryInfo->sch.type); - if ((pQueryInfo->optr == TSDB_RELATION_ISNULL && isnull) || - (pQueryInfo->optr == TSDB_RELATION_NOTNULL && (!isnull))) { - STableKeyInfo info = {.pTable = (void *)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - } - } - } - - taosMemoryFree(cond.start); - taosMemoryFree(cond.end); - tSkipListDestroyIter(iter); -} - -static void queryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArray* res, __result_filter_fn_t filterFp) { - SSkipListIterator* iter = tSkipListCreateIter(pSkipList); - - while (tSkipListIterNext(iter)) { - bool addToResult = false; - - SSkipListNode *pNode = tSkipListIterGet(iter); - - char *pData = SL_GET_NODE_DATA(pNode); - tstr *name = (tstr*) tsdbGetTableName((void*) pData); - - // todo speed up by using hash - if (pQueryInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) { - if (pQueryInfo->optr == TSDB_RELATION_IN) { - addToResult = pQueryInfo->compare(name, pQueryInfo->q); - } else if (pQueryInfo->optr == TSDB_RELATION_LIKE || - pQueryInfo->optr == TSDB_RELATION_MATCH || - pQueryInfo->optr == TSDB_RELATION_NMATCH) { - addToResult = !pQueryInfo->compare(name, pQueryInfo->q); - } - } else { - addToResult = filterFp(pNode, pQueryInfo); - } - - if (addToResult) { - STableKeyInfo info = {.pTable = (void*)pData, .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(res, &info); - } - } - - tSkipListDestroyIter(iter); -} - -// Apply the filter expression to each node in the skiplist to acquire the qualified nodes in skip list -//void getTableListfromSkipList(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp *param) { -// if (pExpr == NULL) { -// return; -// } -// -// tExprNode *pLeft = pExpr->_node.pLeft; -// tExprNode *pRight = pExpr->_node.pRight; -// -// // column project -// if (pLeft->nodeType != TSQL_NODE_EXPR && pRight->nodeType != TSQL_NODE_EXPR) { -// assert(pLeft->nodeType == TSQL_NODE_COL && (pRight->nodeType == TSQL_NODE_VALUE || pRight->nodeType == TSQL_NODE_DUMMY)); -// -// param->setupInfoFn(pExpr, param->pExtInfo); -// -// tQueryInfo *pQueryInfo = pExpr->_node.info; -// if (pQueryInfo->indexed && (pQueryInfo->optr != TSDB_RELATION_LIKE -// && pQueryInfo->optr != TSDB_RELATION_MATCH && pQueryInfo->optr != TSDB_RELATION_NMATCH -// && pQueryInfo->optr != TSDB_RELATION_IN)) { -// queryIndexedColumn(pSkipList, pQueryInfo, result); -// } else { -// queryIndexlessColumn(pSkipList, pQueryInfo, result, param->nodeFilterFn); -// } -// -// return; -// } -// -// // The value of hasPK is always 0. -// uint8_t weight = pLeft->_node.hasPK + pRight->_node.hasPK; -// assert(weight == 0 && pSkipList != NULL && taosArrayGetSize(result) == 0); -// -// //apply the hierarchical filter expression to every node in skiplist to find the qualified nodes -// applyFilterToSkipListNode(pSkipList, pExpr, result, param); -//} -#endif diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 40f75804dd..621ba11872 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -678,6 +678,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in int32_t nRows; int32_t tsize, ret; SEncoder encoder = {0}; + SArray *newTbUids = NULL; terrno = TSDB_CODE_SUCCESS; pRsp->code = 0; @@ -698,6 +699,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in } submitRsp.pArray = taosArrayInit(pSubmitReq->numOfBlocks, sizeof(SSubmitBlkRsp)); + newTbUids = taosArrayInit(pSubmitReq->numOfBlocks, sizeof(int64_t)); if (!submitRsp.pArray) { pRsp->code = TSDB_CODE_OUT_OF_MEMORY; goto _exit; @@ -727,6 +729,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in goto _exit; } } + taosArrayPush(newTbUids, &createTbReq.uid); submitBlkRsp.uid = createTbReq.uid; submitBlkRsp.tblFName = taosMemoryMalloc(strlen(pVnode->config.dbname) + strlen(createTbReq.name) + 2); @@ -754,8 +757,10 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in submitRsp.affectedRows += submitBlkRsp.affectedRows; taosArrayPush(submitRsp.pArray, &submitBlkRsp); } + tqUpdateTbUidList(pVnode->pTq, newTbUids, true); _exit: + taosArrayDestroy(newTbUids); tEncodeSize(tEncodeSSubmitRsp, &submitRsp, tsize, ret); pRsp->pCont = rpcMallocCont(tsize); pRsp->contLen = tsize; diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index 40bd3659a3..230949ab7f 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -49,19 +49,21 @@ enum { }; enum { - CTG_ACT_UPDATE_VG = 0, - CTG_ACT_UPDATE_TBL, - CTG_ACT_REMOVE_DB, - CTG_ACT_REMOVE_STB, - CTG_ACT_REMOVE_TBL, - CTG_ACT_UPDATE_USER, - CTG_ACT_MAX + CTG_OP_UPDATE_VGROUP = 0, + CTG_OP_UPDATE_TB_META, + CTG_OP_DROP_DB_CACHE, + CTG_OP_DROP_STB_META, + CTG_OP_DROP_TB_META, + CTG_OP_UPDATE_USER, + CTG_OP_UPDATE_VG_EPSET, + CTG_OP_MAX }; typedef enum { CTG_TASK_GET_QNODE = 0, CTG_TASK_GET_DB_VGROUP, CTG_TASK_GET_DB_CFG, + CTG_TASK_GET_DB_INFO, CTG_TASK_GET_TB_META, CTG_TASK_GET_TB_HASH, CTG_TASK_GET_INDEX, @@ -98,6 +100,10 @@ typedef struct SCtgDbCfgCtx { char dbFName[TSDB_DB_FNAME_LEN]; } SCtgDbCfgCtx; +typedef struct SCtgDbInfoCtx { + char dbFName[TSDB_DB_FNAME_LEN]; +} SCtgDbInfoCtx; + typedef struct SCtgTbHashCtx { char dbFName[TSDB_DB_FNAME_LEN]; SName* pName; @@ -182,6 +188,7 @@ typedef struct SCtgJob { int32_t dbCfgNum; int32_t indexNum; int32_t userNum; + int32_t dbInfoNum; } SCtgJob; typedef struct SCtgMsgCtx { @@ -285,16 +292,22 @@ typedef struct SCtgUpdateUserMsg { SGetUserAuthRsp userAuth; } SCtgUpdateUserMsg; +typedef struct SCtgUpdateEpsetMsg { + SCatalog* pCtg; + char dbFName[TSDB_DB_FNAME_LEN]; + int32_t vgId; + SEpSet epSet; +} SCtgUpdateEpsetMsg; -typedef struct SCtgMetaAction { - int32_t act; +typedef struct SCtgCacheOperation { + int32_t opId; void *data; bool syncReq; uint64_t seqId; -} SCtgMetaAction; +} SCtgCacheOperation; typedef struct SCtgQNode { - SCtgMetaAction action; + SCtgCacheOperation op; struct SCtgQNode *next; } SCtgQNode; @@ -321,13 +334,13 @@ typedef struct SCatalogMgmt { } SCatalogMgmt; typedef uint32_t (*tableNameHashFp)(const char *, uint32_t); -typedef int32_t (*ctgActFunc)(SCtgMetaAction *); +typedef int32_t (*ctgOpFunc)(SCtgCacheOperation *); -typedef struct SCtgAction { - int32_t actId; +typedef struct SCtgOperation { + int32_t opId; char name[32]; - ctgActFunc func; -} SCtgAction; + ctgOpFunc func; +} SCtgOperation; #define CTG_QUEUE_ADD() atomic_add_fetch_64(&gCtgMgmt.queue.qRemainNum, 1) #define CTG_QUEUE_SUB() atomic_sub_fetch_64(&gCtgMgmt.queue.qRemainNum, 1) @@ -435,12 +448,13 @@ int32_t ctgdShowCacheInfo(void); int32_t ctgRemoveTbMetaFromCache(SCatalog* pCtg, SName* pTableName, bool syncReq); int32_t ctgGetTbMetaFromCache(CTG_PARAMS, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta); -int32_t ctgActUpdateVg(SCtgMetaAction *action); -int32_t ctgActUpdateTb(SCtgMetaAction *action); -int32_t ctgActRemoveDB(SCtgMetaAction *action); -int32_t ctgActRemoveStb(SCtgMetaAction *action); -int32_t ctgActRemoveTb(SCtgMetaAction *action); -int32_t ctgActUpdateUser(SCtgMetaAction *action); +int32_t ctgOpUpdateVgroup(SCtgCacheOperation *action); +int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *action); +int32_t ctgOpDropDbCache(SCtgCacheOperation *action); +int32_t ctgOpDropStbMeta(SCtgCacheOperation *action); +int32_t ctgOpDropTbMeta(SCtgCacheOperation *action); +int32_t ctgOpUpdateUser(SCtgCacheOperation *action); +int32_t ctgOpUpdateEpset(SCtgCacheOperation *operation); int32_t ctgAcquireVgInfoFromCache(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache); void ctgReleaseDBCache(SCatalog *pCtg, SCtgDBCache *dbCache); void ctgReleaseVgInfo(SCtgDBCache *dbCache); @@ -449,12 +463,13 @@ int32_t ctgTbMetaExistInCache(SCatalog* pCtg, char *dbFName, char* tbName, int32 int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta); int32_t ctgReadTbVerFromCache(SCatalog *pCtg, const SName *pTableName, int32_t *sver, int32_t *tver, int32_t *tbType, uint64_t *suid, char *stbName); int32_t ctgChkAuthFromCache(SCatalog* pCtg, const char* user, const char* dbFName, AUTH_TYPE type, bool *inCache, bool *pass); -int32_t ctgPutRmDBToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId); -int32_t ctgPutRmStbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, bool syncReq); -int32_t ctgPutRmTbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncReq); -int32_t ctgPutUpdateVgToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, SDBVgInfo* dbInfo, bool syncReq); -int32_t ctgPutUpdateTbToQueue(SCatalog* pCtg, STableMetaOutput *output, bool syncReq); -int32_t ctgPutUpdateUserToQueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncReq); +int32_t ctgDropDbCacheEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId); +int32_t ctgDropStbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, bool syncReq); +int32_t ctgDropTbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncReq); +int32_t ctgUpdateVgroupEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, SDBVgInfo* dbInfo, bool syncReq); +int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool syncReq); +int32_t ctgUpdateUserEnqueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncReq); +int32_t ctgUpdateVgEpsetEnqueue(SCatalog* pCtg, char *dbFName, int32_t vgId, SEpSet* pEpSet); int32_t ctgMetaRentInit(SCtgRentMgmt *mgmt, uint32_t rentSec, int8_t type); int32_t ctgMetaRentAdd(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t size); int32_t ctgMetaRentGet(SCtgRentMgmt *mgmt, void **res, uint32_t *num, int32_t size); diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c index 1cfc498c25..6519440dad 100644 --- a/source/libs/catalog/src/catalog.c +++ b/source/libs/catalog/src/catalog.c @@ -41,9 +41,9 @@ int32_t ctgRemoveTbMetaFromCache(SCatalog* pCtg, SName* pTableName, bool syncReq tNameGetFullDbName(pTableName, dbFName); if (TSDB_SUPER_TABLE == tblMeta->tableType) { - CTG_ERR_JRET(ctgPutRmStbToQueue(pCtg, dbFName, tbCtx.tbInfo.dbId, pTableName->tname, tblMeta->suid, syncReq)); + CTG_ERR_JRET(ctgDropStbMetaEnqueue(pCtg, dbFName, tbCtx.tbInfo.dbId, pTableName->tname, tblMeta->suid, syncReq)); } else { - CTG_ERR_JRET(ctgPutRmTbToQueue(pCtg, dbFName, tbCtx.tbInfo.dbId, pTableName->tname, syncReq)); + CTG_ERR_JRET(ctgDropTbMetaEnqueue(pCtg, dbFName, tbCtx.tbInfo.dbId, pTableName->tname, syncReq)); } _return: @@ -72,7 +72,7 @@ int32_t ctgGetDBVgInfo(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, con CTG_ERR_JRET(ctgCloneVgInfo(DbOut.dbVgroup, pInfo)); - CTG_ERR_RET(ctgPutUpdateVgToQueue(pCtg, dbFName, DbOut.dbId, DbOut.dbVgroup, false)); + CTG_ERR_RET(ctgUpdateVgroupEnqueue(pCtg, dbFName, DbOut.dbId, DbOut.dbVgroup, false)); return TSDB_CODE_SUCCESS; @@ -108,13 +108,13 @@ int32_t ctgRefreshDBVgInfo(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, if (code) { if (CTG_DB_NOT_EXIST(code) && (NULL != dbCache)) { ctgDebug("db no longer exist, dbFName:%s, dbId:%" PRIx64, input.db, input.dbId); - ctgPutRmDBToQueue(pCtg, input.db, input.dbId); + ctgDropDbCacheEnqueue(pCtg, input.db, input.dbId); } CTG_ERR_RET(code); } - CTG_ERR_RET(ctgPutUpdateVgToQueue(pCtg, dbFName, DbOut.dbId, DbOut.dbVgroup, true)); + CTG_ERR_RET(ctgUpdateVgroupEnqueue(pCtg, dbFName, DbOut.dbId, DbOut.dbVgroup, true)); return TSDB_CODE_SUCCESS; } @@ -201,7 +201,7 @@ int32_t ctgRefreshTbMeta(CTG_PARAMS, SCtgTbMetaCtx* ctx, STableMetaOutput **pOut CTG_ERR_JRET(ctgCloneMetaOutput(output, pOutput)); } - CTG_ERR_JRET(ctgPutUpdateTbToQueue(pCtg, output, syncReq)); + CTG_ERR_JRET(ctgUpdateTbMetaEnqueue(pCtg, output, syncReq)); return TSDB_CODE_SUCCESS; @@ -298,9 +298,9 @@ _return: } if (TSDB_SUPER_TABLE == ctx->tbInfo.tbType) { - ctgPutRmStbToQueue(pCtg, dbFName, ctx->tbInfo.dbId, ctx->pName->tname, ctx->tbInfo.suid, false); + ctgDropStbMetaEnqueue(pCtg, dbFName, ctx->tbInfo.dbId, ctx->pName->tname, ctx->tbInfo.suid, false); } else { - ctgPutRmTbToQueue(pCtg, dbFName, ctx->tbInfo.dbId, ctx->pName->tname, false); + ctgDropTbMetaEnqueue(pCtg, dbFName, ctx->tbInfo.dbId, ctx->pName->tname, false); } } @@ -348,7 +348,7 @@ int32_t ctgChkAuth(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const c _return: - ctgPutUpdateUserToQueue(pCtg, &authRsp, false); + ctgUpdateUserEnqueue(pCtg, &authRsp, false); return TSDB_CODE_SUCCESS; } @@ -670,7 +670,7 @@ int32_t catalogUpdateDBVgInfo(SCatalog* pCtg, const char* dbFName, uint64_t dbId CTG_ERR_JRET(TSDB_CODE_CTG_INVALID_INPUT); } - code = ctgPutUpdateVgToQueue(pCtg, dbFName, dbId, dbInfo, false); + code = ctgUpdateVgroupEnqueue(pCtg, dbFName, dbId, dbInfo, false); _return: @@ -691,7 +691,7 @@ int32_t catalogRemoveDB(SCatalog* pCtg, const char* dbFName, uint64_t dbId) { CTG_API_LEAVE(TSDB_CODE_SUCCESS); } - CTG_ERR_JRET(ctgPutRmDBToQueue(pCtg, dbFName, dbId)); + CTG_ERR_JRET(ctgDropDbCacheEnqueue(pCtg, dbFName, dbId)); CTG_API_LEAVE(TSDB_CODE_SUCCESS); @@ -701,7 +701,19 @@ _return: } int32_t catalogUpdateVgEpSet(SCatalog* pCtg, const char* dbFName, int32_t vgId, SEpSet *epSet) { - return 0; + CTG_API_ENTER(); + + int32_t code = 0; + + if (NULL == pCtg || NULL == dbFName || NULL == epSet) { + CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); + } + + CTG_ERR_JRET(ctgUpdateVgEpsetEnqueue(pCtg, (char*)dbFName, vgId, epSet)); + +_return: + + CTG_API_LEAVE(code); } int32_t catalogRemoveTableMeta(SCatalog* pCtg, SName* pTableName) { @@ -738,7 +750,7 @@ int32_t catalogRemoveStbMeta(SCatalog* pCtg, const char* dbFName, uint64_t dbId, CTG_API_LEAVE(TSDB_CODE_SUCCESS); } - CTG_ERR_JRET(ctgPutRmStbToQueue(pCtg, dbFName, dbId, stbName, suid, true)); + CTG_ERR_JRET(ctgDropStbMetaEnqueue(pCtg, dbFName, dbId, stbName, suid, true)); CTG_API_LEAVE(TSDB_CODE_SUCCESS); @@ -791,7 +803,7 @@ int32_t catalogUpdateSTableMeta(SCatalog* pCtg, STableMetaRsp *rspMsg) { CTG_ERR_JRET(queryCreateTableMetaFromMsg(rspMsg, true, &output->tbMeta)); - CTG_ERR_JRET(ctgPutUpdateTbToQueue(pCtg, output, false)); + CTG_ERR_JRET(ctgUpdateTbMetaEnqueue(pCtg, output, false)); CTG_API_LEAVE(code); @@ -1152,7 +1164,7 @@ int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth) { CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } - CTG_API_LEAVE(ctgPutUpdateUserToQueue(pCtg, pAuth, false)); + CTG_API_LEAVE(ctgUpdateUserEnqueue(pCtg, pAuth, false)); } diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c index 0341c3638b..eb84bf00a4 100644 --- a/source/libs/catalog/src/ctgAsync.c +++ b/source/libs/catalog/src/ctgAsync.c @@ -95,6 +95,30 @@ int32_t ctgInitGetDbCfgTask(SCtgJob *pJob, int32_t taskIdx, char *dbFName) { return TSDB_CODE_SUCCESS; } +int32_t ctgInitGetDbInfoTask(SCtgJob *pJob, int32_t taskIdx, char *dbFName) { + SCtgTask task = {0}; + + task.type = CTG_TASK_GET_DB_INFO; + task.taskId = taskIdx; + task.pJob = pJob; + + task.taskCtx = taosMemoryCalloc(1, sizeof(SCtgDbInfoCtx)); + if (NULL == task.taskCtx) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + SCtgDbInfoCtx* ctx = task.taskCtx; + + memcpy(ctx->dbFName, dbFName, sizeof(ctx->dbFName)); + + taosArrayPush(pJob->pTasks, &task); + + qDebug("QID:%" PRIx64 " task %d type %d initialized, dbFName:%s", pJob->queryId, taskIdx, task.type, dbFName); + + return TSDB_CODE_SUCCESS; +} + + int32_t ctgInitGetTbHashTask(SCtgJob *pJob, int32_t taskIdx, SName *name) { SCtgTask task = {0}; @@ -219,8 +243,9 @@ int32_t ctgInitJob(CTG_PARAMS, SCtgJob** job, uint64_t reqId, const SCatalogReq* int32_t dbCfgNum = (int32_t)taosArrayGetSize(pReq->pDbCfg); int32_t indexNum = (int32_t)taosArrayGetSize(pReq->pIndex); int32_t userNum = (int32_t)taosArrayGetSize(pReq->pUser); + int32_t dbInfoNum = (int32_t)taosArrayGetSize(pReq->pDbInfo); - int32_t taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dbCfgNum + indexNum + userNum; + int32_t taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dbCfgNum + indexNum + userNum + dbInfoNum; if (taskNum <= 0) { ctgError("empty input for job, taskNum:%d", taskNum); CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT); @@ -249,6 +274,7 @@ int32_t ctgInitJob(CTG_PARAMS, SCtgJob** job, uint64_t reqId, const SCatalogReq* pJob->dbCfgNum = dbCfgNum; pJob->indexNum = indexNum; pJob->userNum = userNum; + pJob->dbInfoNum = dbInfoNum; pJob->pTasks = taosArrayInit(taskNum, sizeof(SCtgTask)); @@ -268,6 +294,11 @@ int32_t ctgInitJob(CTG_PARAMS, SCtgJob** job, uint64_t reqId, const SCatalogReq* CTG_ERR_JRET(ctgInitGetDbCfgTask(pJob, taskIdx++, dbFName)); } + for (int32_t i = 0; i < dbInfoNum; ++i) { + char *dbFName = taosArrayGet(pReq->pDbInfo, i); + CTG_ERR_JRET(ctgInitGetDbInfoTask(pJob, taskIdx++, dbFName)); + } + for (int32_t i = 0; i < tbMetaNum; ++i) { SName *name = taosArrayGet(pReq->pTableMeta, i); CTG_ERR_JRET(ctgInitGetTbMetaTask(pJob, taskIdx++, name)); @@ -395,6 +426,20 @@ int32_t ctgDumpDbCfgRes(SCtgTask* pTask) { return TSDB_CODE_SUCCESS; } +int32_t ctgDumpDbInfoRes(SCtgTask* pTask) { + SCtgJob* pJob = pTask->pJob; + if (NULL == pJob->jobRes.pDbInfo) { + pJob->jobRes.pDbInfo = taosArrayInit(pJob->dbInfoNum, sizeof(SDbInfo)); + if (NULL == pJob->jobRes.pDbInfo) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + } + + taosArrayPush(pJob->jobRes.pDbInfo, pTask->res); + + return TSDB_CODE_SUCCESS; +} + int32_t ctgDumpUdfRes(SCtgTask* pTask) { SCtgJob* pJob = pTask->pJob; if (NULL == pJob->jobRes.pUdfList) { @@ -620,7 +665,7 @@ int32_t ctgHandleGetDbVgRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pM CTG_ERR_JRET(ctgGenerateVgList(pCtg, pOut->dbVgroup->vgHash, (SArray**)&pTask->res)); - CTG_ERR_JRET(ctgPutUpdateVgToQueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false)); + CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false)); pOut->dbVgroup = NULL; break; @@ -659,7 +704,7 @@ int32_t ctgHandleGetTbHashRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf * CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, ctx->pName, (SVgroupInfo*)pTask->res)); - CTG_ERR_JRET(ctgPutUpdateVgToQueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false)); + CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false)); pOut->dbVgroup = NULL; break; @@ -691,6 +736,11 @@ _return: CTG_RET(code); } +int32_t ctgHandleGetDbInfoRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) { + CTG_RET(TSDB_CODE_APP_ERROR); +} + + int32_t ctgHandleGetQnodeRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) { int32_t code = 0; CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); @@ -769,7 +819,7 @@ _return: } } - ctgPutUpdateUserToQueue(pCtg, pOut, false); + ctgUpdateUserEnqueue(pCtg, pOut, false); taosMemoryFreeClear(pTask->msgCtx.out); ctgHandleTaskEnd(pTask, code); @@ -933,6 +983,41 @@ int32_t ctgLaunchGetDbCfgTask(SCtgTask *pTask) { return TSDB_CODE_SUCCESS; } +int32_t ctgLaunchGetDbInfoTask(SCtgTask *pTask) { + int32_t code = 0; + SCatalog* pCtg = pTask->pJob->pCtg; + void *pTrans = pTask->pJob->pTrans; + const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps; + SCtgDBCache *dbCache = NULL; + SCtgDbInfoCtx* pCtx = (SCtgDbInfoCtx*)pTask->taskCtx; + + pTask->res = taosMemoryCalloc(1, sizeof(SDbInfo)); + if (NULL == pTask->res) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + SDbInfo* pInfo = (SDbInfo*)pTask->res; + CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, pCtx->dbFName, &dbCache)); + if (NULL != dbCache) { + pInfo->vgVer = dbCache->vgInfo->vgVersion; + pInfo->dbId = dbCache->dbId; + pInfo->tbNum = dbCache->vgInfo->numOfTable; + } else { + pInfo->vgVer = CTG_DEFAULT_INVALID_VERSION; + } + + CTG_ERR_JRET(ctgHandleTaskEnd(pTask, 0)); + +_return: + + if (dbCache) { + ctgReleaseVgInfo(dbCache); + ctgReleaseDBCache(pCtg, dbCache); + } + + CTG_RET(code); +} + int32_t ctgLaunchGetIndexTask(SCtgTask *pTask) { SCatalog* pCtg = pTask->pJob->pCtg; void *pTrans = pTask->pJob->pTrans; @@ -992,6 +1077,7 @@ SCtgAsyncFps gCtgAsyncFps[] = { {ctgLaunchGetQnodeTask, ctgHandleGetQnodeRsp, ctgDumpQnodeRes}, {ctgLaunchGetDbVgTask, ctgHandleGetDbVgRsp, ctgDumpDbVgRes}, {ctgLaunchGetDbCfgTask, ctgHandleGetDbCfgRsp, ctgDumpDbCfgRes}, + {ctgLaunchGetDbInfoTask, ctgHandleGetDbInfoRsp, ctgDumpDbInfoRes}, {ctgLaunchGetTbMetaTask, ctgHandleGetTbMetaRsp, ctgDumpTbMetaRes}, {ctgLaunchGetTbHashTask, ctgHandleGetTbHashRsp, ctgDumpTbHashRes}, {ctgLaunchGetIndexTask, ctgHandleGetIndexRsp, ctgDumpIndexRes}, diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c index 9161c7cb32..d1e2056bec 100644 --- a/source/libs/catalog/src/ctgCache.c +++ b/source/libs/catalog/src/ctgCache.c @@ -19,37 +19,43 @@ #include "catalogInt.h" #include "systable.h" -SCtgAction gCtgAction[CTG_ACT_MAX] = { +SCtgOperation gCtgCacheOperation[CTG_OP_MAX] = { { - CTG_ACT_UPDATE_VG, + CTG_OP_UPDATE_VGROUP, "update vgInfo", - ctgActUpdateVg + ctgOpUpdateVgroup }, { - CTG_ACT_UPDATE_TBL, + CTG_OP_UPDATE_TB_META, "update tbMeta", - ctgActUpdateTb + ctgOpUpdateTbMeta }, { - CTG_ACT_REMOVE_DB, - "remove DB", - ctgActRemoveDB + CTG_OP_DROP_DB_CACHE, + "drop DB", + ctgOpDropDbCache }, { - CTG_ACT_REMOVE_STB, - "remove stbMeta", - ctgActRemoveStb + CTG_OP_DROP_STB_META, + "drop stbMeta", + ctgOpDropStbMeta }, { - CTG_ACT_REMOVE_TBL, - "remove tbMeta", - ctgActRemoveTb + CTG_OP_DROP_TB_META, + "drop tbMeta", + ctgOpDropTbMeta }, { - CTG_ACT_UPDATE_USER, + CTG_OP_UPDATE_USER, "update user", - ctgActUpdateUser + ctgOpUpdateUser + }, + { + CTG_OP_UPDATE_VG_EPSET, + "update epset", + ctgOpUpdateEpset } + }; @@ -405,7 +411,7 @@ int32_t ctgReadTbVerFromCache(SCatalog *pCtg, const SName *pTableName, int32_t * } -int32_t ctgGetTbTypeFromCache(SCatalog* pCtg, const char* dbFName, const char *tableName, int32_t *tbType) { +int32_t ctgReadTbTypeFromCache(SCatalog* pCtg, const char* dbFName, const char *tableName, int32_t *tbType) { if (NULL == pCtg->dbCache) { ctgWarn("empty db cache, dbFName:%s, tbName:%s", dbFName, tableName); return TSDB_CODE_SUCCESS; @@ -491,7 +497,7 @@ _return: } -void ctgWaitAction(SCtgMetaAction *action) { +void ctgWaitOpDone(SCtgCacheOperation *action) { while (true) { tsem_wait(&gCtgMgmt.queue.rspSem); @@ -509,7 +515,7 @@ void ctgWaitAction(SCtgMetaAction *action) { } } -void ctgPopAction(SCtgMetaAction **action) { +void ctgDequeue(SCtgCacheOperation **op) { SCtgQNode *orig = gCtgMgmt.queue.head; SCtgQNode *node = gCtgMgmt.queue.head->next; @@ -519,20 +525,20 @@ void ctgPopAction(SCtgMetaAction **action) { taosMemoryFreeClear(orig); - *action = &node->action; + *op = &node->op; } -int32_t ctgPushAction(SCatalog* pCtg, SCtgMetaAction *action) { +int32_t ctgEnqueue(SCatalog* pCtg, SCtgCacheOperation *operation) { SCtgQNode *node = taosMemoryCalloc(1, sizeof(SCtgQNode)); if (NULL == node) { qError("calloc %d failed", (int32_t)sizeof(SCtgQNode)); CTG_RET(TSDB_CODE_CTG_MEM_ERROR); } - action->seqId = atomic_add_fetch_64(&gCtgMgmt.queue.seqId, 1); + operation->seqId = atomic_add_fetch_64(&gCtgMgmt.queue.seqId, 1); - node->action = *action; + node->op = *operation; CTG_LOCK(CTG_WRITE, &gCtgMgmt.queue.qlock); gCtgMgmt.queue.tail->next = node; @@ -544,19 +550,19 @@ int32_t ctgPushAction(SCatalog* pCtg, SCtgMetaAction *action) { tsem_post(&gCtgMgmt.queue.reqSem); - ctgDebug("action [%s] added into queue", gCtgAction[action->act].name); + ctgDebug("action [%s] added into queue", gCtgCacheOperation[operation->opId].name); - if (action->syncReq) { - ctgWaitAction(action); + if (operation->syncReq) { + ctgWaitOpDone(operation); } return TSDB_CODE_SUCCESS; } -int32_t ctgPutRmDBToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId) { +int32_t ctgDropDbCacheEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId) { int32_t code = 0; - SCtgMetaAction action= {.act = CTG_ACT_REMOVE_DB}; + SCtgCacheOperation action= {.opId = CTG_OP_DROP_DB_CACHE}; SCtgRemoveDBMsg *msg = taosMemoryMalloc(sizeof(SCtgRemoveDBMsg)); if (NULL == msg) { ctgError("malloc %d failed", (int32_t)sizeof(SCtgRemoveDBMsg)); @@ -574,7 +580,7 @@ int32_t ctgPutRmDBToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId) { action.data = msg; - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); return TSDB_CODE_SUCCESS; @@ -585,9 +591,9 @@ _return: } -int32_t ctgPutRmStbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, bool syncReq) { +int32_t ctgDropStbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, bool syncReq) { int32_t code = 0; - SCtgMetaAction action= {.act = CTG_ACT_REMOVE_STB, .syncReq = syncReq}; + SCtgCacheOperation action= {.opId = CTG_OP_DROP_STB_META, .syncReq = syncReq}; SCtgRemoveStbMsg *msg = taosMemoryMalloc(sizeof(SCtgRemoveStbMsg)); if (NULL == msg) { ctgError("malloc %d failed", (int32_t)sizeof(SCtgRemoveStbMsg)); @@ -602,7 +608,7 @@ int32_t ctgPutRmStbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, co action.data = msg; - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); return TSDB_CODE_SUCCESS; @@ -614,9 +620,9 @@ _return: -int32_t ctgPutRmTbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncReq) { +int32_t ctgDropTbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncReq) { int32_t code = 0; - SCtgMetaAction action= {.act = CTG_ACT_REMOVE_TBL, .syncReq = syncReq}; + SCtgCacheOperation action= {.opId = CTG_OP_DROP_TB_META, .syncReq = syncReq}; SCtgRemoveTblMsg *msg = taosMemoryMalloc(sizeof(SCtgRemoveTblMsg)); if (NULL == msg) { ctgError("malloc %d failed", (int32_t)sizeof(SCtgRemoveTblMsg)); @@ -630,7 +636,7 @@ int32_t ctgPutRmTbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, con action.data = msg; - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); return TSDB_CODE_SUCCESS; @@ -640,9 +646,9 @@ _return: CTG_RET(code); } -int32_t ctgPutUpdateVgToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, SDBVgInfo* dbInfo, bool syncReq) { +int32_t ctgUpdateVgroupEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, SDBVgInfo* dbInfo, bool syncReq) { int32_t code = 0; - SCtgMetaAction action= {.act = CTG_ACT_UPDATE_VG, .syncReq = syncReq}; + SCtgCacheOperation action= {.opId = CTG_OP_UPDATE_VGROUP, .syncReq = syncReq}; SCtgUpdateVgMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateVgMsg)); if (NULL == msg) { ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateVgMsg)); @@ -662,7 +668,7 @@ int32_t ctgPutUpdateVgToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, action.data = msg; - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); return TSDB_CODE_SUCCESS; @@ -673,9 +679,9 @@ _return: CTG_RET(code); } -int32_t ctgPutUpdateTbToQueue(SCatalog* pCtg, STableMetaOutput *output, bool syncReq) { +int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool syncReq) { int32_t code = 0; - SCtgMetaAction action= {.act = CTG_ACT_UPDATE_TBL, .syncReq = syncReq}; + SCtgCacheOperation action= {.opId = CTG_OP_UPDATE_TB_META, .syncReq = syncReq}; SCtgUpdateTblMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateTblMsg)); if (NULL == msg) { ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateTblMsg)); @@ -692,7 +698,7 @@ int32_t ctgPutUpdateTbToQueue(SCatalog* pCtg, STableMetaOutput *output, bool syn action.data = msg; - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); return TSDB_CODE_SUCCESS; @@ -703,9 +709,38 @@ _return: CTG_RET(code); } -int32_t ctgPutUpdateUserToQueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncReq) { +int32_t ctgUpdateVgEpsetEnqueue(SCatalog* pCtg, char *dbFName, int32_t vgId, SEpSet* pEpSet) { int32_t code = 0; - SCtgMetaAction action= {.act = CTG_ACT_UPDATE_USER, .syncReq = syncReq}; + SCtgCacheOperation operation= {.opId = CTG_OP_UPDATE_VG_EPSET}; + SCtgUpdateEpsetMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateEpsetMsg)); + if (NULL == msg) { + ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateEpsetMsg)); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + msg->pCtg = pCtg; + strcpy(msg->dbFName, dbFName); + msg->vgId = vgId; + msg->epSet = *pEpSet; + + operation.data = msg; + + CTG_ERR_JRET(ctgEnqueue(pCtg, &operation)); + + return TSDB_CODE_SUCCESS; + +_return: + + taosMemoryFreeClear(msg); + + CTG_RET(code); +} + + + +int32_t ctgUpdateUserEnqueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncReq) { + int32_t code = 0; + SCtgCacheOperation action= {.opId = CTG_OP_UPDATE_USER, .syncReq = syncReq}; SCtgUpdateUserMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateUserMsg)); if (NULL == msg) { ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateUserMsg)); @@ -717,7 +752,7 @@ int32_t ctgPutUpdateUserToQueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syn action.data = msg; - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); return TSDB_CODE_SUCCESS; @@ -1219,7 +1254,7 @@ int32_t ctgUpdateTbMetaToCache(SCatalog* pCtg, STableMetaOutput* pOut, bool sync int32_t code = 0; CTG_ERR_RET(ctgCloneMetaOutput(pOut, &pOutput)); - CTG_ERR_JRET(ctgPutUpdateTbToQueue(pCtg, pOutput, syncReq)); + CTG_ERR_JRET(ctgUpdateTbMetaEnqueue(pCtg, pOutput, syncReq)); return TSDB_CODE_SUCCESS; @@ -1230,9 +1265,9 @@ _return: } -int32_t ctgActUpdateVg(SCtgMetaAction *action) { +int32_t ctgOpUpdateVgroup(SCtgCacheOperation *operation) { int32_t code = 0; - SCtgUpdateVgMsg *msg = action->data; + SCtgUpdateVgMsg *msg = operation->data; CTG_ERR_JRET(ctgWriteDBVgInfoToCache(msg->pCtg, msg->dbFName, msg->dbId, &msg->dbInfo)); @@ -1244,9 +1279,9 @@ _return: CTG_RET(code); } -int32_t ctgActRemoveDB(SCtgMetaAction *action) { +int32_t ctgOpDropDbCache(SCtgCacheOperation *operation) { int32_t code = 0; - SCtgRemoveDBMsg *msg = action->data; + SCtgRemoveDBMsg *msg = operation->data; SCatalog* pCtg = msg->pCtg; SCtgDBCache *dbCache = NULL; @@ -1270,9 +1305,9 @@ _return: } -int32_t ctgActUpdateTb(SCtgMetaAction *action) { +int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *operation) { int32_t code = 0; - SCtgUpdateTblMsg *msg = action->data; + SCtgUpdateTblMsg *msg = operation->data; SCatalog* pCtg = msg->pCtg; STableMetaOutput* output = msg->output; SCtgDBCache *dbCache = NULL; @@ -1316,9 +1351,9 @@ _return: } -int32_t ctgActRemoveStb(SCtgMetaAction *action) { +int32_t ctgOpDropStbMeta(SCtgCacheOperation *operation) { int32_t code = 0; - SCtgRemoveStbMsg *msg = action->data; + SCtgRemoveStbMsg *msg = operation->data; SCatalog* pCtg = msg->pCtg; SCtgDBCache *dbCache = NULL; @@ -1362,9 +1397,9 @@ _return: CTG_RET(code); } -int32_t ctgActRemoveTb(SCtgMetaAction *action) { +int32_t ctgOpDropTbMeta(SCtgCacheOperation *operation) { int32_t code = 0; - SCtgRemoveTblMsg *msg = action->data; + SCtgRemoveTblMsg *msg = operation->data; SCatalog* pCtg = msg->pCtg; SCtgDBCache *dbCache = NULL; @@ -1397,9 +1432,9 @@ _return: CTG_RET(code); } -int32_t ctgActUpdateUser(SCtgMetaAction *action) { +int32_t ctgOpUpdateUser(SCtgCacheOperation *operation) { int32_t code = 0; - SCtgUpdateUserMsg *msg = action->data; + SCtgUpdateUserMsg *msg = operation->data; SCatalog* pCtg = msg->pCtg; if (NULL == pCtg->userCache) { @@ -1460,14 +1495,60 @@ _return: CTG_RET(code); } -void ctgUpdateThreadFuncUnexpectedStopped(void) { +int32_t ctgOpUpdateEpset(SCtgCacheOperation *operation) { + int32_t code = 0; + SCtgUpdateEpsetMsg *msg = operation->data; + SCatalog* pCtg = msg->pCtg; + + SCtgDBCache *dbCache = NULL; + CTG_ERR_RET(ctgAcquireDBCache(pCtg, msg->dbFName, &dbCache)); + if (NULL == dbCache) { + ctgDebug("db %s not exist, ignore epset update", msg->dbFName); + goto _return; + } + + SDBVgInfo *vgInfo = NULL; + CTG_ERR_RET(ctgWAcquireVgInfo(pCtg, dbCache)); + + if (NULL == dbCache->vgInfo) { + ctgWReleaseVgInfo(dbCache); + ctgDebug("vgroup in db %s not cached, ignore epset update", msg->dbFName); + goto _return; + } + + SVgroupInfo* pInfo = taosHashGet(dbCache->vgInfo->vgHash, &msg->vgId, sizeof(msg->vgId)); + if (NULL == pInfo) { + ctgWReleaseVgInfo(dbCache); + ctgDebug("no vgroup %d in db %s, ignore epset update", msg->vgId, msg->dbFName); + goto _return; + } + + pInfo->epSet = msg->epSet; + + ctgDebug("epset in vgroup %d updated, dbFName:%s", pInfo->vgId, msg->dbFName); + + ctgWReleaseVgInfo(dbCache); + +_return: + + if (dbCache) { + ctgReleaseDBCache(msg->pCtg, dbCache); + } + + taosMemoryFreeClear(msg); + + CTG_RET(code); +} + + +void ctgUpdateThreadUnexpectedStopped(void) { if (CTG_IS_LOCKED(&gCtgMgmt.lock) > 0) CTG_UNLOCK(CTG_READ, &gCtgMgmt.lock); } void* ctgUpdateThreadFunc(void* param) { setThreadName("catalog"); #ifdef WINDOWS - atexit(ctgUpdateThreadFuncUnexpectedStopped); + atexit(ctgUpdateThreadUnexpectedStopped); #endif qInfo("catalog update thread started"); @@ -1483,17 +1564,17 @@ void* ctgUpdateThreadFunc(void* param) { break; } - SCtgMetaAction *action = NULL; - ctgPopAction(&action); - SCatalog *pCtg = ((SCtgUpdateMsgHeader *)action->data)->pCtg; + SCtgCacheOperation *operation = NULL; + ctgDequeue(&operation); + SCatalog *pCtg = ((SCtgUpdateMsgHeader *)operation->data)->pCtg; - ctgDebug("process [%s] action", gCtgAction[action->act].name); + ctgDebug("process [%s] operation", gCtgCacheOperation[operation->opId].name); - (*gCtgAction[action->act].func)(action); + (*gCtgCacheOperation[operation->opId].func)(operation); - gCtgMgmt.queue.seqDone = action->seqId; + gCtgMgmt.queue.seqDone = operation->seqId; - if (action->syncReq) { + if (operation->syncReq) { tsem_post(&gCtgMgmt.queue.rspSem); } diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c index 1f78a97733..4fbf1463d8 100644 --- a/source/libs/catalog/src/ctgUtil.c +++ b/source/libs/catalog/src/ctgUtil.c @@ -42,6 +42,9 @@ void ctgFreeSMetaData(SMetaData* pData) { } taosArrayDestroy(pData->pDbCfg); pData->pDbCfg = NULL; + + taosArrayDestroy(pData->pDbInfo); + pData->pDbInfo = NULL; taosArrayDestroy(pData->pIndex); pData->pIndex = NULL; @@ -293,9 +296,12 @@ void ctgFreeTask(SCtgTask* pTask) { } case CTG_TASK_GET_DB_CFG: { taosMemoryFreeClear(pTask->taskCtx); - if (pTask->res) { - taosMemoryFreeClear(pTask->res); - } + taosMemoryFreeClear(pTask->res); + break; + } + case CTG_TASK_GET_DB_INFO: { + taosMemoryFreeClear(pTask->taskCtx); + taosMemoryFreeClear(pTask->res); break; } case CTG_TASK_GET_TB_HASH: { diff --git a/source/libs/catalog/test/catalogTests.cpp b/source/libs/catalog/test/catalogTests.cpp index 6c7d1ac4ca..4409f2c321 100644 --- a/source/libs/catalog/test/catalogTests.cpp +++ b/source/libs/catalog/test/catalogTests.cpp @@ -41,7 +41,7 @@ namespace { extern "C" int32_t ctgdGetClusterCacheNum(struct SCatalog* pCatalog, int32_t type); -extern "C" int32_t ctgActUpdateTb(SCtgMetaAction *action); +extern "C" int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *action); extern "C" int32_t ctgdEnableDebug(char *option); extern "C" int32_t ctgdGetStatNum(char *option, void *res); @@ -888,9 +888,9 @@ void *ctgTestSetCtableMetaThread(void *param) { int32_t n = 0; STableMetaOutput *output = NULL; - SCtgMetaAction action = {0}; + SCtgCacheOperation operation = {0}; - action.act = CTG_ACT_UPDATE_TBL; + operation.opId = CTG_OP_UPDATE_TB_META; while (!ctgTestStop) { output = (STableMetaOutput *)taosMemoryMalloc(sizeof(STableMetaOutput)); @@ -899,9 +899,9 @@ void *ctgTestSetCtableMetaThread(void *param) { SCtgUpdateTblMsg *msg = (SCtgUpdateTblMsg *)taosMemoryMalloc(sizeof(SCtgUpdateTblMsg)); msg->pCtg = pCtg; msg->output = output; - action.data = msg; + operation.data = msg; - code = ctgActUpdateTb(&action); + code = ctgOpUpdateTbMeta(&operation); if (code) { assert(0); } diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 300149a22d..9ea327636d 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -334,6 +334,8 @@ typedef struct STableScanInfo { int32_t dataBlockLoadFlag; double sampleRatio; // data block sample ratio, 1 by default SInterval interval; // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if current data block needs to be loaded. + + int32_t curTWinIdx; } STableScanInfo; typedef struct STagScanInfo { @@ -373,30 +375,33 @@ typedef struct SessionWindowSupporter { } SessionWindowSupporter; typedef struct SStreamBlockScanInfo { - SArray* pBlockLists; // multiple SSDatablock. - SSDataBlock* pRes; // result SSDataBlock - SSDataBlock* pUpdateRes; // update SSDataBlock - int32_t updateResIndex; - int32_t blockType; // current block type - int32_t validBlockIndex; // Is current data has returned? - SColumnInfo* pCols; // the output column info - uint64_t numOfExec; // execution times - void* streamBlockReader;// stream block reader handle - SArray* pColMatchInfo; // - SNode* pCondition; - SArray* tsArray; - SUpdateInfo* pUpdateInfo; - int32_t primaryTsIndex; // primary time stamp slot id - void* pDataReader; - SReadHandle readHandle; - uint64_t tableUid; // queried super table uid + SArray* pBlockLists; // multiple SSDatablock. + SSDataBlock* pRes; // result SSDataBlock + SSDataBlock* pUpdateRes; // update SSDataBlock + int32_t updateResIndex; + int32_t blockType; // current block type + int32_t validBlockIndex; // Is current data has returned? + SColumnInfo* pCols; // the output column info + uint64_t numOfExec; // execution times + void* streamBlockReader;// stream block reader handle + SArray* pColMatchInfo; // + SNode* pCondition; + SArray* tsArray; + SUpdateInfo* pUpdateInfo; + + SExprInfo* pPseudoExpr; + int32_t numOfPseudoExpr; + + int32_t primaryTsIndex; // primary time stamp slot id + void* pDataReader; + SReadHandle readHandle; + uint64_t tableUid; // queried super table uid EStreamScanMode scanMode; SOperatorInfo* pOperatorDumy; SInterval interval; // if the upstream is an interval operator, the interval info is also kept here. - SCatchSupporter childAggSup; - SArray* childIds; + SArray* childIds; SessionWindowSupporter sessionSup; - bool assignBlockUid; // assign block uid to groupId, temporarily used for generating rollup SMA. + bool assignBlockUid; // assign block uid to groupId, temporarily used for generating rollup SMA. } SStreamBlockScanInfo; typedef struct SSysTableScanInfo { @@ -435,6 +440,7 @@ typedef struct SAggSupporter { typedef struct STimeWindowSupp { int8_t calTrigger; int64_t waterMark; + TSKEY maxTs; SColumnInfoData timeWindowData; // query time window info for scalar function execution. } STimeWindowAggSupp; @@ -566,6 +572,7 @@ typedef struct SResultWindowInfo { SResultRowPosition pos; STimeWindow win; bool isOutput; + bool isClosed; } SResultWindowInfo; typedef struct SStreamSessionAggOperatorInfo { @@ -735,10 +742,11 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx SSDataBlock* pResultBlock, SArray* pGroupColList, SNode* pCondition, SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo); SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataReader, SReadHandle* pHandle, - uint64_t uid, SSDataBlock* pResBlock, SArray* pColList, - SArray* pTableIdList, SExecTaskInfo* pTaskInfo, SNode* pCondition, - SOperatorInfo* pOperatorDumy); + +SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle, + SArray* pTableIdList, STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo, + STimeWindowAggSupp* pTwSup, int16_t tsColId); + SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols, SInterval* pInterval, STimeWindow* pWindow, SSDataBlock* pResBlock, int32_t fillType, SNodeListNode* fillVal, @@ -794,8 +802,6 @@ int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimary int32_t startPos, TSKEY ekey, __block_search_fn_t searchFn, STableQueryInfo* item, int32_t order); int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order); -int32_t initCacheSupporter(SCatchSupporter* pCatchSup, size_t rowSize, const char* pKey, - const char* pDir); int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey); SResultRow* getNewResultRow_rv(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize); SResultWindowInfo* getSessionTimeWindow(SArray* pWinInfos, TSKEY ts, int64_t gap, @@ -803,6 +809,8 @@ SResultWindowInfo* getSessionTimeWindow(SArray* pWinInfos, TSKEY ts, int64_t gap int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pTs, int32_t rows, int32_t start, int64_t gap, SHashObj* pStDeleted); bool functionNeedToExecute(SqlFunctionCtx* pCtx); + +int32_t compareTimeWindow(const void* p1, const void* p2, const void* param); #ifdef __cplusplus } #endif diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 5a02547f58..aea9d70f31 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -233,7 +233,7 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList) { if (pGroupResInfo->pRows != NULL) { - taosArrayDestroy(pGroupResInfo->pRows); + taosArrayDestroyP(pGroupResInfo->pRows, taosMemoryFree); } pGroupResInfo->pRows = pArrayList; diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 684c657d17..2e76e8dd3f 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -28,13 +28,13 @@ #include "ttime.h" #include "executorimpl.h" +#include "index.h" #include "query.h" #include "tcompare.h" #include "tcompression.h" #include "thash.h" #include "ttypes.h" #include "vnode.h" -#include "index.h" #define IS_MAIN_SCAN(runtime) ((runtime)->scanFlag == MAIN_SCAN) #define IS_REVERSE_SCAN(runtime) ((runtime)->scanFlag == REVERSE_SCAN) @@ -87,7 +87,7 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) { #define realloc u_realloc #endif -#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st))) +#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st))) //#define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList) #define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0) @@ -3960,11 +3960,11 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n return TSDB_CODE_OUT_OF_MEMORY; } - uint32_t defaultPgsz = 0; + uint32_t defaultPgsz = 0; uint32_t defaultBufsz = 0; getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz); - int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, TD_TMP_DIR_PATH); + int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, TD_TMP_DIR_PATH); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -4001,7 +4001,7 @@ void initResultSizeInfo(SOperatorInfo* pOperator, int32_t numOfRows) { } } -//static STableQueryInfo* initTableQueryInfo(const STableListInfo* pTableListInfo) { +// static STableQueryInfo* initTableQueryInfo(const STableListInfo* pTableListInfo) { // int32_t size = taosArrayGetSize(pTableListInfo->pTableList); // if (size == 0) { // return NULL; @@ -4434,9 +4434,11 @@ static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPT } static tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, - STableListInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId, SNode* pTagCond); + STableListInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId, + SNode* pTagCond); -static int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo, SNode* pTagCond); +static int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo, + SNode* pTagCond); static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo); static SArray* extractColumnInfo(SNodeList* pNodeList); @@ -4473,7 +4475,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == type) { STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode; - tsdbReaderT pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond); + tsdbReaderT pDataReader = + doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond); if (pDataReader == NULL && terrno != 0) { return NULL; } @@ -4492,9 +4495,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN == type) { SScanPhysiNode* pScanPhyNode = (SScanPhysiNode*)pPhyNode; // simple child table. STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode; - - int32_t numOfCols = 0; - + STimeWindowAggSupp twSup = {.waterMark = pTableScanNode->watermark, + .calTrigger = pTableScanNode->triggerType, .maxTs = INT64_MIN}; tsdbReaderT pDataReader = NULL; if (pHandle->vnode) { pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond); @@ -4503,24 +4505,16 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo } if (pDataReader == NULL && terrno != 0) { - qDebug("pDataReader is NULL"); + qDebug("%s pDataReader is NULL", GET_TASKID(pTaskInfo)); // return NULL; } else { - qDebug("pDataReader is not NULL"); + qDebug("%s pDataReader is not NULL", GET_TASKID(pTaskInfo)); } - SDataBlockDescNode* pDescNode = pScanPhyNode->node.pOutputDataBlockDesc; - SOperatorInfo* pOperatorDumy = createTableScanOperatorInfo(pTableScanNode, pDataReader, pHandle, pTaskInfo); - SArray* tableIdList = extractTableIdList(pTableListInfo); + SOperatorInfo* pOperator = createStreamScanOperatorInfo(pDataReader, pHandle, + tableIdList, pTableScanNode, pTaskInfo, &twSup, pTableScanNode->tsColId); - SSDataBlock* pResBlock = createResDataBlock(pDescNode); - SArray* pCols = - extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID); - - SOperatorInfo* pOperator = - createStreamScanOperatorInfo(pHandle->reader, pDataReader, pHandle, pScanPhyNode->uid, pResBlock, pCols, - tableIdList, pTaskInfo, pScanPhyNode->node.pConditions, pOperatorDumy); taosArrayDestroy(tableIdList); return pOperator; } else if (QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN == type) { @@ -4602,8 +4596,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo pOptr = createGroupOperatorInfo(ops[0], pExprInfo, num, pResBlock, pColList, pAggNode->node.pConditions, pScalarExprInfo, numOfScalarExpr, pTaskInfo); } else { - pOptr = createAggregateOperatorInfo(ops[0], pExprInfo, num, pResBlock, pScalarExprInfo, numOfScalarExpr, - pTaskInfo); + pOptr = + createAggregateOperatorInfo(ops[0], pExprInfo, num, pResBlock, pScalarExprInfo, numOfScalarExpr, pTaskInfo); } } else if (QUERY_NODE_PHYSICAL_PLAN_INTERVAL == type || QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL == type) { SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode; @@ -4619,7 +4613,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo .precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision}; STimeWindowAggSupp as = {.waterMark = pIntervalPhyNode->window.watermark, - .calTrigger = pIntervalPhyNode->window.triggerType}; + .calTrigger = pIntervalPhyNode->window.triggerType, + .maxTs = INT64_MIN}; int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId; pOptr = createIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, &as, pTaskInfo); @@ -4706,6 +4701,18 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo return pOptr; } +int32_t compareTimeWindow(const void* p1, const void* p2, const void* param) { + const SQueryTableDataCond *pCond = param; + const STimeWindow *pWin1 = p1; + const STimeWindow *pWin2 = p2; + if (pCond->order == TSDB_ORDER_ASC) { + return pWin1->skey - pWin2->skey; + } else if (pCond->order == TSDB_ORDER_DESC) { + return pWin2->skey - pWin1->skey; + } + return 0; +} + int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysiNode* pTableScanNode) { pCond->loadExternalRows = false; @@ -4717,16 +4724,34 @@ int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysi return terrno; } - pCond->twindow = pTableScanNode->scanRange; + //pCond->twindow = pTableScanNode->scanRange; + //TODO: get it from stable scan node + pCond->numOfTWindows = 1; + pCond->twindows = taosMemoryCalloc(pCond->numOfTWindows, sizeof(STimeWindow)); + pCond->twindows[0] = pTableScanNode->scanRange; #if 1 // todo work around a problem, remove it later - if ((pCond->order == TSDB_ORDER_ASC && pCond->twindow.skey > pCond->twindow.ekey) || - (pCond->order == TSDB_ORDER_DESC && pCond->twindow.skey < pCond->twindow.ekey)) { - TSWAP(pCond->twindow.skey, pCond->twindow.ekey); + for (int32_t i = 0; i < pCond->numOfTWindows; ++i) { + if ((pCond->order == TSDB_ORDER_ASC && pCond->twindows[i].skey > pCond->twindows[i].ekey) || + (pCond->order == TSDB_ORDER_DESC && pCond->twindows[i].skey < pCond->twindows[i].ekey)) { + TSWAP(pCond->twindows[i].skey, pCond->twindows[i].ekey); + } } #endif + for (int32_t i = 0; i < pCond->numOfTWindows; ++i) { + if ((pCond->order == TSDB_ORDER_ASC && pCond->twindows[i].skey > pCond->twindows[i].ekey) || + (pCond->order == TSDB_ORDER_DESC && pCond->twindows[i].skey < pCond->twindows[i].ekey)) { + TSWAP(pCond->twindows[i].skey, pCond->twindows[i].ekey); + } + } + taosqsort(pCond->twindows, + pCond->numOfTWindows, + sizeof(STimeWindow), + pCond, + compareTimeWindow); + pCond->type = BLOCK_LOAD_OFFSET_SEQ_ORDER; // pCond->type = pTableScanNode->scanFlag; @@ -4885,27 +4910,30 @@ SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNod return pList; } -int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, - STableListInfo* pListInfo, SNode* pTagCond) { +int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo, SNode* pTagCond) { int32_t code = TSDB_CODE_SUCCESS; pListInfo->pTableList = taosArrayInit(8, sizeof(STableKeyInfo)); if (tableType == TSDB_SUPER_TABLE) { - if(pTagCond){ + if (pTagCond) { + SIndexMetaArg metaArg = {.metaHandle = tsdbGetIdx(metaHandle), .suid = tableUid}; + SArray* res = taosArrayInit(8, sizeof(uint64_t)); - code = doFilterTag(pTagCond, res); + code = doFilterTag(pTagCond, &metaArg, res); if (code != TSDB_CODE_SUCCESS) { qError("doFilterTag error:%d", code); taosArrayDestroy(res); terrno = code; return code; + } else { + qDebug("doFilterTag error:%d, suid: %" PRIu64 "", code, tableUid); } - for(int i = 0; i < taosArrayGetSize(res); i++){ + for (int i = 0; i < taosArrayGetSize(res); i++) { STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, .uid = *(uint64_t*)taosArrayGet(res, i)}; taosArrayPush(pListInfo->pTableList, &info); } taosArrayDestroy(res); - }else{ + } else { code = tsdbGetAllTableList(metaHandle, tableUid, pListInfo->pTableList); } } else { // Create one table group. @@ -4930,7 +4958,8 @@ SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) { tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId, SNode* pTagCond) { - int32_t code = getTableList(pHandle->meta, pTableScanNode->scan.tableType, pTableScanNode->scan.uid, pTableListInfo, pTagCond); + int32_t code = + getTableList(pHandle->meta, pTableScanNode->scan.tableType, pTableScanNode->scan.uid, pTableListInfo, pTagCond); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -4965,8 +4994,8 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead goto _complete; } - (*pTaskInfo)->pRoot = - createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, queryId, taskId, &(*pTaskInfo)->tableqinfoList, pPlan->pTagCond); + (*pTaskInfo)->pRoot = createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, queryId, taskId, + &(*pTaskInfo)->tableqinfoList, pPlan->pTagCond); if (NULL == (*pTaskInfo)->pRoot) { code = terrno; goto _complete; @@ -5151,20 +5180,6 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo return TSDB_CODE_SUCCESS; } -int32_t initCacheSupporter(SCatchSupporter* pCatchSup, size_t rowSize, const char* pKey, const char* pDir) { - pCatchSup->keySize = sizeof(int64_t) + sizeof(int64_t) + sizeof(TSKEY); - pCatchSup->pKeyBuf = taosMemoryCalloc(1, pCatchSup->keySize); - _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); - pCatchSup->pWindowHashTable = taosHashInit(10000, hashFn, true, HASH_NO_LOCK); - if (pCatchSup->pKeyBuf == NULL || pCatchSup->pWindowHashTable == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; - } - - int32_t pageSize = rowSize * 32; - int32_t bufSize = pageSize * 4096; - return createDiskbasedBuf(&pCatchSup->pDataBuf, pageSize, bufSize, pKey, pDir); -} - int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey) { pSup->keySize = sizeof(int64_t) + sizeof(TSKEY); pSup->pKeyBuf = taosMemoryCalloc(1, pSup->keySize); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 613fa26c2d..b954eb3a22 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -158,7 +158,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn return false; } -static void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock); +static void addTagPseudoColumnData(SReadHandle *pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, SSDataBlock* pBlock); static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableScanInfo, SSDataBlock* pBlock, uint32_t* status) { @@ -250,7 +250,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca // currently only the tbname pseudo column if (pTableScanInfo->numOfPseudoExpr > 0) { - addTagPseudoColumnData(pTableScanInfo, pBlock); + addTagPseudoColumnData(&pTableScanInfo->readHandle, pTableScanInfo->pPseudoExpr, pTableScanInfo->numOfPseudoExpr, pBlock); } int64_t st = taosGetTimestampMs(); @@ -274,23 +274,31 @@ static void prepareForDescendingScan(STableScanInfo* pTableScanInfo, SqlFunction switchCtxOrder(pCtx, numOfOutput); // setupQueryRangeForReverseScan(pTableScanInfo); - STimeWindow* pTWindow = &pTableScanInfo->cond.twindow; - TSWAP(pTWindow->skey, pTWindow->ekey); pTableScanInfo->cond.order = TSDB_ORDER_DESC; + for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) { + STimeWindow* pTWindow = &pTableScanInfo->cond.twindows[i]; + TSWAP(pTWindow->skey, pTWindow->ekey); + } + SQueryTableDataCond *pCond = &pTableScanInfo->cond; + taosqsort(pCond->twindows, + pCond->numOfTWindows, + sizeof(STimeWindow), + pCond, + compareTimeWindow); } -void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock) { +void addTagPseudoColumnData(SReadHandle *pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, SSDataBlock* pBlock) { // currently only the tbname pseudo column - if (pTableScanInfo->numOfPseudoExpr == 0) { + if (numOfPseudoExpr == 0) { return; } SMetaReader mr = {0}; - metaReaderInit(&mr, pTableScanInfo->readHandle.meta, 0); + metaReaderInit(&mr, pHandle->meta, 0); metaGetTableEntryByUid(&mr, pBlock->info.uid); - for (int32_t j = 0; j < pTableScanInfo->numOfPseudoExpr; ++j) { - SExprInfo* pExpr = &pTableScanInfo->pPseudoExpr[j]; + for (int32_t j = 0; j < numOfPseudoExpr; ++j) { + SExprInfo* pExpr = &pPseudoExpr[j]; int32_t dstSlotId = pExpr->base.resSchema.slotId; @@ -301,7 +309,7 @@ void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock) // this is to handle the tbname if (fmIsScanPseudoColumnFunc(functionId)) { - setTbNameColData(pTableScanInfo->readHandle.meta, pBlock, pColInfoData, functionId); + setTbNameColData(pHandle->meta, pBlock, pColInfoData, functionId); } else { // these are tags const char* p = NULL; if (pColInfoData->info.type == TSDB_DATA_TYPE_JSON) { @@ -380,7 +388,6 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) { pOperator->cost.totalCost = pTableScanInfo->readRecorder.elapsedTime; return pBlock; } - return NULL; } @@ -395,9 +402,15 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { // do the ascending order traverse in the first place. while (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) { - SSDataBlock* p = doTableScanImpl(pOperator); - if (p != NULL) { - return p; + while (pTableScanInfo->curTWinIdx < pTableScanInfo->cond.numOfTWindows) { + SSDataBlock* p = doTableScanImpl(pOperator); + if (p != NULL) { + return p; + } + pTableScanInfo->curTWinIdx += 1; + if (pTableScanInfo->curTWinIdx < pTableScanInfo->cond.numOfTWindows) { + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, pTableScanInfo->curTWinIdx); + } } pTableScanInfo->scanTimes += 1; @@ -405,14 +418,14 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { if (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) { setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED); pTableScanInfo->scanFlag = REPEAT_SCAN; - - STimeWindow* pWin = &pTableScanInfo->cond.twindow; - qDebug("%s start to repeat ascending order scan data blocks due to query func required, qrange:%" PRId64 - "-%" PRId64, - GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); - + qDebug("%s start to repeat ascending order scan data blocks due to query func required", GET_TASKID(pTaskInfo)); + for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) { + STimeWindow* pWin = &pTableScanInfo->cond.twindows[i]; + qDebug("%s\t qrange:%" PRId64 "-%" PRId64, GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); + } // do prepare for the next round table scan operation - tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond); + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); + pTableScanInfo->curTWinIdx = 0; } } @@ -420,31 +433,40 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { if (pTableScanInfo->scanTimes < total) { if (pTableScanInfo->cond.order == TSDB_ORDER_ASC) { prepareForDescendingScan(pTableScanInfo, pTableScanInfo->pCtx, pTableScanInfo->numOfOutput); - tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond); + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); + pTableScanInfo->curTWinIdx = 0; } - STimeWindow* pWin = &pTableScanInfo->cond.twindow; - qDebug("%s start to descending order scan data blocks due to query func required, qrange:%" PRId64 "-%" PRId64, - GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); - + qDebug("%s start to descending order scan data blocks due to query func required", GET_TASKID(pTaskInfo)); + for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) { + STimeWindow* pWin = &pTableScanInfo->cond.twindows[i]; + qDebug("%s\t qrange:%" PRId64 "-%" PRId64, GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); + } while (pTableScanInfo->scanTimes < total) { - SSDataBlock* p = doTableScanImpl(pOperator); - if (p != NULL) { - return p; + while (pTableScanInfo->curTWinIdx < pTableScanInfo->cond.numOfTWindows) { + SSDataBlock* p = doTableScanImpl(pOperator); + if (p != NULL) { + return p; + } + pTableScanInfo->curTWinIdx += 1; + if (pTableScanInfo->curTWinIdx < pTableScanInfo->cond.numOfTWindows) { + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, pTableScanInfo->curTWinIdx); + } } pTableScanInfo->scanTimes += 1; - if (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) { + if (pTableScanInfo->scanTimes < total) { setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED); pTableScanInfo->scanFlag = REPEAT_SCAN; - qDebug("%s start to repeat descending order scan data blocks due to query func required, qrange:%" PRId64 - "-%" PRId64, - GET_TASKID(pTaskInfo), pTaskInfo->window.skey, pTaskInfo->window.ekey); - - // do prepare for the next round table scan operation - tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond); + qDebug("%s start to repeat descending order scan data blocks due to query func required", GET_TASKID(pTaskInfo)); + for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) { + STimeWindow* pWin = &pTableScanInfo->cond.twindows[i]; + qDebug("%s\t qrange:%" PRId64 "-%" PRId64, GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); + } + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); + pTableScanInfo->curTWinIdx = 0; } } } @@ -524,6 +546,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, pInfo->dataReader = pDataReader; pInfo->scanFlag = MAIN_SCAN; pInfo->pColMatchInfo = pColList; + pInfo->curTWinIdx = 0; pOperator->name = "TableScanOperator"; // for debug purpose pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN; @@ -678,8 +701,9 @@ static bool prepareDataScan(SStreamBlockScanInfo* pInfo) { binarySearchForKey, NULL, TSDB_ORDER_ASC); } STableScanInfo* pTableScanInfo = pInfo->pOperatorDumy->info; - pTableScanInfo->cond.twindow = win; - tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond); + pTableScanInfo->cond.twindows[0] = win; + pTableScanInfo->curTWinIdx = 0; + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); pTableScanInfo->scanTimes = 0; return true; } else { @@ -731,91 +755,6 @@ static SSDataBlock* getUpdateDataBlock(SStreamBlockScanInfo* pInfo, bool inverti return NULL; } -void static setSupKeyBuf(SCatchSupporter* pSup, int64_t groupId, int64_t childId, TSKEY ts) { - int64_t* pKey = (int64_t*)pSup->pKeyBuf; - pKey[0] = groupId; - pKey[1] = childId; - pKey[2] = ts; -} - -static int32_t catchWidonwInfo(SSDataBlock* pDataBlock, SCatchSupporter* pSup, int32_t pageId, int32_t tsIndex, - int64_t childId) { - SColumnInfoData* pColDataInfo = taosArrayGet(pDataBlock->pDataBlock, tsIndex); - TSKEY* tsCols = (int64_t*)pColDataInfo->pData; - for (int32_t i = 0; i < pDataBlock->info.rows; i++) { - setSupKeyBuf(pSup, pDataBlock->info.groupId, childId, tsCols[i]); - SWindowPosition* p1 = (SWindowPosition*)taosHashGet(pSup->pWindowHashTable, pSup->pKeyBuf, pSup->keySize); - if (p1 == NULL) { - SWindowPosition pos = {.pageId = pageId, .rowId = i}; - int32_t code = taosHashPut(pSup->pWindowHashTable, pSup->pKeyBuf, pSup->keySize, &pos, sizeof(SWindowPosition)); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } else { - p1->pageId = pageId; - p1->rowId = i; - } - } - return TSDB_CODE_SUCCESS; -} - -static int32_t catchDatablock(SSDataBlock* pDataBlock, SCatchSupporter* pSup, int32_t tsIndex, int64_t childId) { - int32_t start = 0; - int32_t stop = 0; - int32_t pageSize = getBufPageSize(pSup->pDataBuf); - while (start < pDataBlock->info.rows) { - blockDataSplitRows(pDataBlock, pDataBlock->info.hasVarCol, start, &stop, pageSize); - SSDataBlock* pDB = blockDataExtractBlock(pDataBlock, start, stop - start + 1); - if (pDB == NULL) { - return terrno; - } - int32_t pageId = -1; - void* pPage = getNewBufPage(pSup->pDataBuf, pDataBlock->info.groupId, &pageId); - if (pPage == NULL) { - blockDataDestroy(pDB); - return terrno; - } - int32_t size = blockDataGetSize(pDB) + sizeof(int32_t) + pDB->info.numOfCols * sizeof(int32_t); - assert(size <= pageSize); - blockDataToBuf(pPage, pDB); - setBufPageDirty(pPage, true); - releaseBufPage(pSup->pDataBuf, pPage); - blockDataDestroy(pDB); - start = stop + 1; - int32_t code = catchWidonwInfo(pDB, pSup, pageId, tsIndex, childId); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } - return TSDB_CODE_SUCCESS; -} - -static SSDataBlock* getDataFromCatch(SStreamBlockScanInfo* pInfo) { - SSDataBlock* pBlock = pInfo->pUpdateRes; - if (pInfo->updateResIndex < pBlock->info.rows) { - blockDataCleanup(pInfo->pRes); - SCatchSupporter* pCSup = &pInfo->childAggSup; - SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, 0); - TSKEY* tsCols = (TSKEY*)pColDataInfo->pData; - int32_t size = taosArrayGetSize(pInfo->childIds); - for (int32_t i = 0; i < size; i++) { - int64_t id = *(int64_t*)taosArrayGet(pInfo->childIds, i); - setSupKeyBuf(pCSup, pBlock->info.groupId, id, tsCols[pInfo->updateResIndex]); - SWindowPosition* pos = (SWindowPosition*)taosHashGet(pCSup->pWindowHashTable, pCSup->pKeyBuf, pCSup->keySize); - void* buf = getBufPage(pCSup->pDataBuf, pos->pageId); - SSDataBlock* pDB = createOneDataBlock(pInfo->pRes, false); - blockDataFromBuf(pDB, buf); - SSDataBlock* pSub = blockDataExtractBlock(pDB, pos->rowId, 1); - blockDataMerge(pInfo->pRes, pSub); - blockDataDestroy(pDB); - blockDataDestroy(pSub); - } - pInfo->updateResIndex++; - return pInfo->pRes; - } - return NULL; -} - static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { // NOTE: this operator does never check if current status is done or not SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -886,8 +825,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { pInfo->pRes->info.groupId = groupId; } - int32_t numOfCols = pInfo->pRes->info.numOfCols; - for (int32_t i = 0; i < numOfCols; ++i) { + for (int32_t i = 0; i < taosArrayGetSize(pInfo->pColMatchInfo); ++i) { SColMatchInfo* pColMatchInfo = taosArrayGet(pInfo->pColMatchInfo, i); if (!pColMatchInfo->output) { continue; @@ -917,10 +855,16 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { pTaskInfo->code = terrno; return NULL; } + rows = pBlockInfo->rows; + + // currently only the tbname pseudo column + if (pInfo->numOfPseudoExpr > 0) { + addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, pInfo->pRes); + } + doFilter(pInfo->pCondition, pInfo->pRes, NULL); blockDataUpdateTsWindow(pInfo->pRes, 0); - break; } @@ -948,10 +892,9 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { } } -SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataReader, SReadHandle* pHandle, - uint64_t uid, SSDataBlock* pResBlock, SArray* pColList, - SArray* pTableIdList, SExecTaskInfo* pTaskInfo, SNode* pCondition, - SOperatorInfo* pOperatorDumy) { +SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle, + SArray* pTableIdList, STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo, + STimeWindowAggSupp* pTwSup, int16_t tsColId) { SStreamBlockScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamBlockScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -959,22 +902,28 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataR goto _error; } - STableScanInfo* pSTInfo = (STableScanInfo*)pOperatorDumy->info; + SScanPhysiNode* pScanPhyNode = &pTableScanNode->scan; - int32_t numOfOutput = taosArrayGetSize(pColList); + SDataBlockDescNode* pDescNode = pScanPhyNode->node.pOutputDataBlockDesc; + SOperatorInfo* pTableScanDummy = createTableScanOperatorInfo(pTableScanNode, pDataReader, pHandle, pTaskInfo); - SArray* pColIds = taosArrayInit(4, sizeof(int16_t)); + STableScanInfo* pSTInfo = (STableScanInfo*)pTableScanDummy->info; + + int32_t numOfCols = 0; + pInfo->pColMatchInfo = extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID); + + int32_t numOfOutput = taosArrayGetSize(pInfo->pColMatchInfo); + SArray* pColIds = taosArrayInit(numOfOutput, sizeof(int16_t)); for (int32_t i = 0; i < numOfOutput; ++i) { - SColMatchInfo* id = taosArrayGet(pColList, i); - int16_t colId = id->colId; + SColMatchInfo* id = taosArrayGet(pInfo->pColMatchInfo, i); + + int16_t colId = id->colId; taosArrayPush(pColIds, &colId); } - pInfo->pColMatchInfo = pColList; - // set the extract column id to streamHandle - tqReadHandleSetColIdList((STqReadHandle*)streamReadHandle, pColIds); - int32_t code = tqReadHandleSetTbUidList(streamReadHandle, pTableIdList); + tqReadHandleSetColIdList((STqReadHandle*)pHandle->reader, pColIds); + int32_t code = tqReadHandleSetTbUidList(pHandle->reader, pTableIdList); if (code != 0) { goto _error; } @@ -990,37 +939,39 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataR goto _error; } - pInfo->primaryTsIndex = 0; // TODO(liuyao) get it from physical plan + pInfo->primaryTsIndex = tsColId; if (pSTInfo->interval.interval > 0) { - pInfo->pUpdateInfo = updateInfoInitP(&pSTInfo->interval, 10000); // TODO(liuyao) get watermark from physical plan + pInfo->pUpdateInfo = updateInfoInitP(&pSTInfo->interval, pTwSup->waterMark); } else { pInfo->pUpdateInfo = NULL; } - pInfo->readHandle = *pHandle; - pInfo->tableUid = uid; - pInfo->streamBlockReader = streamReadHandle; - pInfo->pRes = pResBlock; - pInfo->pCondition = pCondition; - pInfo->pDataReader = pDataReader; - pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; - pInfo->pOperatorDumy = pOperatorDumy; - pInfo->interval = pSTInfo->interval; - pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1}; + // create the pseduo columns info + if (pTableScanNode->scan.pScanPseudoCols != NULL) { + pInfo->pPseudoExpr = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pInfo->numOfPseudoExpr); + } - initCacheSupporter(&pInfo->childAggSup, 1024, "StreamFinalInterval", - "/tmp/"); // TODO(liuyao) get row size from phy plan + pInfo->readHandle = *pHandle; + pInfo->tableUid = pScanPhyNode->uid; + pInfo->streamBlockReader = pHandle->reader; + pInfo->pRes = createResDataBlock(pDescNode); + pInfo->pCondition = pScanPhyNode->node.pConditions; + pInfo->pDataReader = pDataReader; + pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; + pInfo->pOperatorDumy = pTableScanDummy; + pInfo->interval = pSTInfo->interval; + pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1}; - pOperator->name = "StreamBlockScanOperator"; + pOperator->name = "StreamBlockScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->info = pInfo; - pOperator->numOfExprs = pResBlock->info.numOfCols; - pOperator->pTaskInfo = pTaskInfo; + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->info = pInfo; + pOperator->numOfExprs = pInfo->pRes->info.numOfCols; + pOperator->pTaskInfo = pTaskInfo; - pOperator->fpSet = - createOperatorFpSet(operatorDummyOpenFn, doStreamBlockScan, NULL, NULL, operatorDummyCloseFn, NULL, NULL, NULL); + pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamBlockScan, NULL, + NULL, operatorDummyCloseFn, NULL, NULL, NULL); return pOperator; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 5d0a0a0270..829968d37f 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -622,18 +622,103 @@ static void saveDataBlockLastRow(char** pRow, SArray* pDataBlock, int32_t rowInd } } -static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock, - uint64_t tableGroupId) { +typedef int64_t (*__get_value_fn_t)(void* data, int32_t index); + +int32_t binarySearch(void* keyList, int num, TSKEY key, int order, + __get_value_fn_t getValuefn) { + int firstPos = 0, lastPos = num - 1, midPos = -1; + int numOfRows = 0; + + if (num <= 0) return -1; + if (order == TSDB_ORDER_DESC) { + // find the first position which is smaller or equal than the key + while (1) { + if (key >= getValuefn(keyList, lastPos)) return lastPos; + if (key == getValuefn(keyList, firstPos)) return firstPos; + if (key < getValuefn(keyList, firstPos)) return firstPos - 1; + + numOfRows = lastPos - firstPos + 1; + midPos = (numOfRows >> 1) + firstPos; + + if (key < getValuefn(keyList, midPos)) { + lastPos = midPos - 1; + } else if (key > getValuefn(keyList, midPos)) { + firstPos = midPos + 1; + } else { + break; + } + } + + } else { + // find the first position which is bigger or equal than the key + while (1) { + if (key <= getValuefn(keyList, firstPos)) return firstPos; + if (key == getValuefn(keyList, lastPos)) return lastPos; + + if (key > getValuefn(keyList, lastPos)) { + lastPos = lastPos + 1; + if (lastPos >= num) + return -1; + else + return lastPos; + } + + numOfRows = lastPos - firstPos + 1; + midPos = (numOfRows >> 1) + firstPos; + + if (key < getValuefn(keyList, midPos)) { + lastPos = midPos - 1; + } else if (key > getValuefn(keyList, midPos)) { + firstPos = midPos + 1; + } else { + break; + } + } + } + + return midPos; +} + +int64_t getReskey(void* data, int32_t index) { + SArray* res = (SArray*) data; + SResKeyPos* pos = taosArrayGetP(res, index); + return *(int64_t*)pos->key; +} + +static int32_t saveResult(SResultRow* result, uint64_t groupId, SArray* pUpdated) { + int32_t size = taosArrayGetSize(pUpdated); + int32_t index = binarySearch(pUpdated, size, result->win.skey, TSDB_ORDER_DESC, getReskey); + if (index == -1) { + index = 0; + } else { + TSKEY resTs = getReskey(pUpdated, index); + if (resTs < result->win.skey) { + index++; + } else { + return TSDB_CODE_SUCCESS; + } + } + + SResKeyPos* newPos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); + if (newPos == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + newPos->groupId = groupId; + newPos->pos = (SResultRowPosition){.pageId = result->pageId, .offset = result->offset}; + *(int64_t*)newPos->key = result->win.skey; + if (taosArrayInsert(pUpdated, index, &newPos) == NULL ){ + return TSDB_CODE_OUT_OF_MEMORY; + } + return TSDB_CODE_SUCCESS; +} + +static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock, + uint64_t tableGroupId, SArray* pUpdated) { SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info; SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo; int32_t numOfOutput = pOperatorInfo->numOfExprs; - SArray* pUpdated = NULL; - if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) { - pUpdated = taosArrayInit(4, POINTER_BYTES); - } - int32_t step = 1; bool ascScan = (pInfo->order == TSDB_ORDER_ASC); @@ -663,13 +748,10 @@ static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } - if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) { - SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); - pos->groupId = tableGroupId; - pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; - *(int64_t*)pos->key = pResult->win.skey; - - taosArrayPush(pUpdated, &pos); + if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && + (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE || + pInfo->twAggSup.calTrigger == 0) ) { + saveResult(pResult, tableGroupId, pUpdated); } int32_t forwardStep = 0; @@ -742,13 +824,10 @@ static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } - if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) { - SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); - pos->groupId = tableGroupId; - pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; - *(int64_t*)pos->key = pResult->win.skey; - - taosArrayPush(pUpdated, &pos); + if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && + (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE || + pInfo->twAggSup.calTrigger == 0) ) { + saveResult(pResult, tableGroupId, pUpdated); } ekey = ascScan? nextWin.ekey:nextWin.skey; @@ -769,7 +848,6 @@ static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe saveDataBlockLastRow(pInfo->pRow, pBlock->pDataBlock, rowIndex, pBlock->info.numOfCols); } - return pUpdated; // updateResultRowInfoActiveIndex(pResultRowInfo, &pInfo->win, pRuntimeEnv->current->lastKey, true, false); } @@ -799,7 +877,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { STableQueryInfo* pTableQueryInfo = pInfo->pCurrent; setIntervalQueryRange(pTableQueryInfo, pBlock->info.window.skey, &pTaskInfo->window); - hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, pBlock->info.groupId); + hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, pBlock->info.groupId, NULL); #if 0 // test for encode/decode result info if(pOperator->encodeResultRow){ @@ -1067,7 +1145,7 @@ void doClearWindow(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, char* pData, } static void doClearWindows(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, - SInterval* pIntrerval, int32_t tsIndex, int32_t numOfOutput, SSDataBlock* pBlock, + SInterval* pInterval, int32_t tsIndex, int32_t numOfOutput, SSDataBlock* pBlock, SArray* pUpWins) { SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, tsIndex); TSKEY *tsCols = (TSKEY*)pColDataInfo->pData; @@ -1075,8 +1153,8 @@ static void doClearWindows(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, for (int32_t i = 0; i < pBlock->info.rows; i += step) { SResultRowInfo dumyInfo; dumyInfo.cur.pageId = -1; - STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[i], pIntrerval, - pIntrerval->precision, NULL); + STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[i], pInterval, + pInterval->precision, NULL); step = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, i, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); doClearWindow(pSup, pBinfo, (char*)&win.skey, sizeof(TKEY), pBlock->info.groupId, numOfOutput); @@ -1086,6 +1164,39 @@ static void doClearWindows(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, } } +static int32_t closeIntervalWindow(SHashObj *pHashMap, STimeWindowAggSupp *pSup, + SInterval* pInterval, SArray* closeWins) { + void *pIte = NULL; + size_t keyLen = 0; + while((pIte = taosHashIterate(pHashMap, pIte)) != NULL) { + void* key = taosHashGetKey(pIte, &keyLen); + uint64_t groupId = *(uint64_t*) key; + ASSERT(keyLen == GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))); + TSKEY ts = *(uint64_t*) ((char*)key + sizeof(uint64_t)); + SResultRowInfo dumyInfo; + dumyInfo.cur.pageId = -1; + STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, ts, pInterval, + pInterval->precision, NULL); + if (win.ekey < pSup->maxTs - pSup->waterMark) { + char keyBuf[GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))]; + SET_RES_WINDOW_KEY(keyBuf, &ts, sizeof(TSKEY), groupId); + taosHashRemove(pHashMap, keyBuf, keyLen); + SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); + if (pos == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + pos->groupId = groupId; + pos->pos = *(SResultRowPosition*) pIte; + *(int64_t*)pos->key = ts; + if (!taosArrayPush(closeWins, &pos)) { + taosMemoryFree(pos); + return TSDB_CODE_OUT_OF_MEMORY; + } + } + } + return TSDB_CODE_SUCCESS; +} + static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { SIntervalAggOperatorInfo* pInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -1106,7 +1217,9 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { SOperatorInfo* downstream = pOperator->pDownstream[0]; - SArray* pUpdated = NULL; + SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); + SArray* pClosed = taosArrayInit(4, POINTER_BYTES); + while (1) { SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); if (pBlock == NULL) { @@ -1128,10 +1241,19 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { continue; } - pUpdated = hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, pBlock->info.groupId); + hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, pBlock->info.groupId, pUpdated); + pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); } - - finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pInfo->binfo.rowCellInfoOffset); + closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, + &pInfo->interval, pClosed); + finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pClosed, + pInfo->binfo.rowCellInfoOffset); + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER__WINDOW_CLOSE) { + taosArrayAddAll(pUpdated, pClosed); + } + taosArrayDestroy(pClosed); + finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, + pInfo->binfo.rowCellInfoOffset); initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); @@ -1935,63 +2057,6 @@ _error: return NULL; } -typedef int64_t (*__get_value_fn_t)(void* data, int32_t index); - -int32_t binarySearch(void* keyList, int num, TSKEY key, int order, - __get_value_fn_t getValuefn) { - int firstPos = 0, lastPos = num - 1, midPos = -1; - int numOfRows = 0; - - if (num <= 0) return -1; - if (order == TSDB_ORDER_DESC) { - // find the first position which is smaller than the key - while (1) { - if (key >= getValuefn(keyList, lastPos)) return lastPos; - if (key == getValuefn(keyList, firstPos)) return firstPos; - if (key < getValuefn(keyList, firstPos)) return firstPos - 1; - - numOfRows = lastPos - firstPos + 1; - midPos = (numOfRows >> 1) + firstPos; - - if (key < getValuefn(keyList, midPos)) { - lastPos = midPos - 1; - } else if (key > getValuefn(keyList, midPos)) { - firstPos = midPos + 1; - } else { - break; - } - } - - } else { - // find the first position which is bigger than the key - while (1) { - if (key <= getValuefn(keyList, firstPos)) return firstPos; - if (key == getValuefn(keyList, lastPos)) return lastPos; - - if (key > getValuefn(keyList, lastPos)) { - lastPos = lastPos + 1; - if (lastPos >= num) - return -1; - else - return lastPos; - } - - numOfRows = lastPos - firstPos + 1; - midPos = (numOfRows >> 1) + firstPos; - - if (key < getValuefn(keyList, midPos)) { - lastPos = midPos - 1; - } else if (key > getValuefn(keyList, midPos)) { - firstPos = midPos + 1; - } else { - break; - } - } - } - - return midPos; -} - int64_t getSessionWindowEndkey(void* data, int32_t index) { SArray* pWinInfos = (SArray*) data; SResultWindowInfo* pWin = taosArrayGet(pWinInfos, index); @@ -2223,12 +2288,14 @@ static void doStreamSessionWindowAggImpl(SOperatorInfo* pOperator, if (winNum > 0) { compactTimeWindow(pInfo, winIndex, winNum, groupId, numOfOutput, pTaskInfo, pStUpdated, pStDeleted); } - - code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &(pCurWin->win.skey), sizeof(TSKEY)); - if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + pCurWin->isClosed = false; + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) { + code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &(pCurWin->win.skey), sizeof(TSKEY)); + if (code != TSDB_CODE_SUCCESS) { + longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + } + pCurWin->isOutput = true; } - pCurWin->isOutput = true; i += winRows; } } @@ -2325,6 +2392,37 @@ bool isFinalSession(SStreamSessionAggOperatorInfo* pInfo) { return pInfo->pChildren != NULL; } +int32_t closeSessionWindow(SArray *pWins, STimeWindowAggSupp *pTwSup, SArray *pClosed, + int8_t calTrigger) { + // Todo(liuyao) save window to tdb + int32_t size = taosArrayGetSize(pWins); + for (int32_t i = 0; i < size; i++) { + SResultWindowInfo *pSeWin = taosArrayGet(pWins, i); + if (pSeWin->win.ekey < pTwSup->maxTs - pTwSup->waterMark) { + if (!pSeWin->isClosed) { + SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); + if (pos == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + pos->groupId = 0; + pos->pos = pSeWin->pos; + *(int64_t*)pos->key = pSeWin->win.ekey; + if (!taosArrayPush(pClosed, &pos)) { + taosMemoryFree(pos); + return TSDB_CODE_OUT_OF_MEMORY; + } + pSeWin->isClosed = true; + if (calTrigger == STREAM_TRIGGER__WINDOW_CLOSE) { + pSeWin->isOutput = true; + } + } + continue; + } + break; + } + return TSDB_CODE_SUCCESS; +} + static SSDataBlock* doStreamSessionWindowAgg(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -2377,13 +2475,21 @@ static SSDataBlock* doStreamSessionWindowAgg(SOperatorInfo* pOperator) { doStreamSessionWindowAggImpl(pOperator, pBlock, NULL, NULL); } doStreamSessionWindowAggImpl(pOperator, pBlock, pStUpdated, pInfo->pStDeleted); + pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); } - // restore the value pOperator->status = OP_RES_TO_RETURN; + + SArray* pClosed = taosArrayInit(16, POINTER_BYTES); + closeSessionWindow(pInfo->streamAggSup.pResultRows, &pInfo->twAggSup, pClosed, + pInfo->twAggSup.calTrigger); SArray* pUpdated = taosArrayInit(16, POINTER_BYTES); copyUpdateResult(pStUpdated, pUpdated, pBInfo->pRes->info.groupId); taosHashCleanup(pStUpdated); + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER__WINDOW_CLOSE) { + taosArrayAddAll(pUpdated, pClosed); + } + finalizeUpdatedResult(pOperator->numOfExprs, pInfo->streamAggSup.pResultBuf, pUpdated, pInfo->binfo.rowCellInfoOffset); initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index c2b1fcd549..10526e1a1c 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -156,6 +156,14 @@ static int32_t translatePercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } + //param0 + SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (nodeType(pParamNode0) != QUERY_NODE_COLUMN) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "The first parameter of PERCENTILE function can only be column"); + } + + //param1 SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1); if (pValue->datum.i < 0 || pValue->datum.i > 100) { @@ -170,6 +178,7 @@ static int32_t translatePercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + //set result type pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; return TSDB_CODE_SUCCESS; } @@ -188,30 +197,47 @@ static int32_t translateApercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; - uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; - if (!IS_NUMERIC_TYPE(para1Type) || !IS_INTEGER_TYPE(para2Type)) { + //param0 + SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (nodeType(pParamNode0) != QUERY_NODE_COLUMN) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "The first parameter of APERCENTILE function can only be column"); + } + + //param1 + SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); + if (nodeType(pParamNode1) != QUERY_NODE_VALUE) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } - SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, 1); - if (nodeType(pParamNode) != QUERY_NODE_VALUE) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - SValueNode* pValue = (SValueNode*)pParamNode; + SValueNode* pValue = (SValueNode*)pParamNode1; if (pValue->datum.i < 0 || pValue->datum.i > 100) { return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); } pValue->notReserved = true; + uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + if (!IS_NUMERIC_TYPE(para1Type) || !IS_INTEGER_TYPE(para2Type)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + //param2 if (3 == numOfParams) { - SNode* pPara3 = nodesListGetNode(pFunc->pParameterList, 2); - if (QUERY_NODE_VALUE != nodeType(pPara3) || !validAperventileAlgo((SValueNode*)pPara3)) { + uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type; + if (!IS_VAR_DATA_TYPE(para3Type)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + SNode* pParamNode2 = nodesListGetNode(pFunc->pParameterList, 2); + if (QUERY_NODE_VALUE != nodeType(pParamNode2) || !validAperventileAlgo((SValueNode*)pParamNode2)) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "Third parameter algorithm of apercentile must be 'default' or 't-digest'"); } + + pValue = (SValueNode*)pParamNode2; + pValue->notReserved = true; } pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; @@ -700,6 +726,11 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { //param1 if (numOfParams == 2) { + uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + if (!IS_INTEGER_TYPE(paraType)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); if (QUERY_NODE_VALUE != nodeType(pParamNode1)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index e2ee6cc6fe..068d06fc31 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -1965,7 +1965,7 @@ bool apercentileFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResult if (pCtx->numOfParams == 2) { pInfo->algo = APERCT_ALGO_DEFAULT; } else if (pCtx->numOfParams == 3) { - pInfo->algo = getApercentileAlgo(pCtx->param[2].param.pz); + pInfo->algo = getApercentileAlgo(varDataVal(pCtx->param[2].param.pz)); if (pInfo->algo == APERCT_ALGO_UNKNOWN) { return false; } diff --git a/source/libs/index/inc/indexCache.h b/source/libs/index/inc/indexCache.h index 6cbe2532cc..1046a04db3 100644 --- a/source/libs/index/inc/indexCache.h +++ b/source/libs/index/inc/indexCache.h @@ -74,7 +74,7 @@ void indexCacheIteratorDestroy(Iterate* iiter); int indexCachePut(void* cache, SIndexTerm* term, uint64_t uid); // int indexCacheGet(void *cache, uint64_t *rst); -int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTempResult* tr, STermValueType* s); +int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTRslt* tr, STermValueType* s); void indexCacheRef(IndexCache* cache); void indexCacheUnRef(IndexCache* cache); diff --git a/source/libs/index/inc/indexTfile.h b/source/libs/index/inc/indexTfile.h index af32caa821..ca55aa93da 100644 --- a/source/libs/index/inc/indexTfile.h +++ b/source/libs/index/inc/indexTfile.h @@ -105,7 +105,7 @@ TFileReader* tfileGetReaderByCol(IndexTFile* tf, uint64_t suid, char* colName); TFileReader* tfileReaderOpen(char* path, uint64_t suid, int64_t version, const char* colName); TFileReader* tfileReaderCreate(WriterCtx* ctx); void tfileReaderDestroy(TFileReader* reader); -int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTempResult* tr); +int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTRslt* tr); void tfileReaderRef(TFileReader* reader); void tfileReaderUnRef(TFileReader* reader); @@ -120,7 +120,7 @@ int tfileWriterFinish(TFileWriter* tw); IndexTFile* indexTFileCreate(const char* path); void indexTFileDestroy(IndexTFile* tfile); int indexTFilePut(void* tfile, SIndexTerm* term, uint64_t uid); -int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTempResult* tr); +int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTRslt* tr); Iterate* tfileIteratorCreate(TFileReader* reader); void tfileIteratorDestroy(Iterate* iterator); diff --git a/source/libs/index/inc/indexUtil.h b/source/libs/index/inc/indexUtil.h index f1676ed411..dbaecaa963 100644 --- a/source/libs/index/inc/indexUtil.h +++ b/source/libs/index/inc/indexUtil.h @@ -66,7 +66,7 @@ extern "C" { * [1, 4, 5] * output:[4, 5] */ -void iIntersection(SArray *interResults, SArray *finalResult); +void iIntersection(SArray *in, SArray *out); /* multi sorted result union * input: [1, 2, 4, 5] @@ -74,7 +74,7 @@ void iIntersection(SArray *interResults, SArray *finalResult); * [1, 4, 5] * output:[1, 2, 3, 4, 5] */ -void iUnion(SArray *interResults, SArray *finalResult); +void iUnion(SArray *in, SArray *out); /* see example * total: [1, 2, 4, 5, 7, 8] @@ -92,19 +92,24 @@ typedef struct { uint64_t data; } SIdxVerdata; +/* + * index temp result + * + */ typedef struct { SArray *total; - SArray *added; - SArray *deled; -} SIdxTempResult; + SArray *add; + SArray *del; +} SIdxTRslt; -SIdxTempResult *sIdxTempResultCreate(); +SIdxTRslt *idxTRsltCreate(); -void sIdxTempResultClear(SIdxTempResult *tr); +void idxTRsltClear(SIdxTRslt *tr); -void sIdxTempResultDestroy(SIdxTempResult *tr); +void idxTRsltDestroy(SIdxTRslt *tr); + +void idxTRsltMergeTo(SIdxTRslt *tr, SArray *out); -void sIdxTempResultMergeTo(SArray *result, SIdxTempResult *tr); #ifdef __cplusplus } #endif diff --git a/source/libs/index/src/index.c b/source/libs/index/src/index.c index 500f570649..8584d95bf2 100644 --- a/source/libs/index/src/index.c +++ b/source/libs/index/src/index.c @@ -29,7 +29,7 @@ #include "lucene++/Lucene_c.h" #endif -#define INDEX_NUM_OF_THREADS 1 +#define INDEX_NUM_OF_THREADS 5 #define INDEX_QUEUE_SIZE 200 #define INDEX_DATA_BOOL_NULL 0x02 @@ -85,7 +85,7 @@ static int indexMergeFinalResults(SArray* interResults, EIndexOperatorType oTyp static int indexGenTFile(SIndex* index, IndexCache* cache, SArray* batch); // merge cache and tfile by opera type -static void indexMergeCacheAndTFile(SArray* result, IterateValue* icache, IterateValue* iTfv, SIdxTempResult* helper); +static void indexMergeCacheAndTFile(SArray* result, IterateValue* icache, IterateValue* iTfv, SIdxTRslt* helper); // static int32_t indexSerialTermKey(SIndexTerm* itm, char* buf); // int32_t indexSerialKey(ICacheKey* key, char* buf); @@ -201,6 +201,7 @@ int indexPut(SIndex* index, SIndexMultiTerm* fVals, uint64_t uid) { char buf[128] = {0}; ICacheKey key = {.suid = p->suid, .colName = p->colName, .nColName = strlen(p->colName), .colType = p->colType}; int32_t sz = indexSerialCacheKey(&key, buf); + indexDebug("suid: %" PRIu64 ", colName: %s, colType: %d", key.suid, key.colName, key.colType); IndexCache** cache = taosHashGet(index->colObj, buf, sz); assert(*cache != NULL); @@ -328,6 +329,7 @@ static int indexTermSearch(SIndex* sIdx, SIndexTermQuery* query, SArray** result char buf[128] = {0}; ICacheKey key = { .suid = term->suid, .colName = term->colName, .nColName = strlen(term->colName), .colType = term->colType}; + indexDebug("suid: %" PRIu64 ", colName: %s, colType: %d", key.suid, key.colName, key.colType); int32_t sz = indexSerialCacheKey(&key, buf); taosThreadMutexLock(&sIdx->mtx); @@ -341,7 +343,7 @@ static int indexTermSearch(SIndex* sIdx, SIndexTermQuery* query, SArray** result int64_t st = taosGetTimestampUs(); - SIdxTempResult* tr = sIdxTempResultCreate(); + SIdxTRslt* tr = idxTRsltCreate(); if (0 == indexCacheSearch(cache, query, tr, &s)) { if (s == kTypeDeletion) { indexInfo("col: %s already drop by", term->colName); @@ -363,12 +365,12 @@ static int indexTermSearch(SIndex* sIdx, SIndexTermQuery* query, SArray** result int64_t cost = taosGetTimestampUs() - st; indexInfo("search cost: %" PRIu64 "us", cost); - sIdxTempResultMergeTo(*result, tr); + idxTRsltMergeTo(tr, *result); - sIdxTempResultDestroy(tr); + idxTRsltDestroy(tr); return 0; END: - sIdxTempResultDestroy(tr); + idxTRsltDestroy(tr); return -1; } static void indexInterResultsDestroy(SArray* results) { @@ -404,18 +406,18 @@ static int indexMergeFinalResults(SArray* interResults, EIndexOperatorType oType return 0; } -static void indexMayMergeTempToFinalResult(SArray* result, TFileValue* tfv, SIdxTempResult* tr) { +static void indexMayMergeTempToFinalResult(SArray* result, TFileValue* tfv, SIdxTRslt* tr) { int32_t sz = taosArrayGetSize(result); if (sz > 0) { TFileValue* lv = taosArrayGetP(result, sz - 1); if (tfv != NULL && strcmp(lv->colVal, tfv->colVal) != 0) { - sIdxTempResultMergeTo(lv->tableId, tr); - sIdxTempResultClear(tr); + idxTRsltMergeTo(tr, lv->tableId); + idxTRsltClear(tr); taosArrayPush(result, &tfv); } else if (tfv == NULL) { // handle last iterator - sIdxTempResultMergeTo(lv->tableId, tr); + idxTRsltMergeTo(tr, lv->tableId); } else { // temp result saved in help tfileValueDestroy(tfv); @@ -424,7 +426,7 @@ static void indexMayMergeTempToFinalResult(SArray* result, TFileValue* tfv, SIdx taosArrayPush(result, &tfv); } } -static void indexMergeCacheAndTFile(SArray* result, IterateValue* cv, IterateValue* tv, SIdxTempResult* tr) { +static void indexMergeCacheAndTFile(SArray* result, IterateValue* cv, IterateValue* tv, SIdxTRslt* tr) { char* colVal = (cv != NULL) ? cv->colVal : tv->colVal; TFileValue* tfv = tfileValueCreate(colVal); @@ -434,9 +436,9 @@ static void indexMergeCacheAndTFile(SArray* result, IterateValue* cv, IterateVal uint64_t id = *(uint64_t*)taosArrayGet(cv->val, 0); uint32_t ver = cv->ver; if (cv->type == ADD_VALUE) { - INDEX_MERGE_ADD_DEL(tr->deled, tr->added, id) + INDEX_MERGE_ADD_DEL(tr->del, tr->add, id) } else if (cv->type == DEL_VALUE) { - INDEX_MERGE_ADD_DEL(tr->added, tr->deled, id) + INDEX_MERGE_ADD_DEL(tr->add, tr->del, id) } } if (tv != NULL) { @@ -489,7 +491,7 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) { bool cn = cacheIter ? cacheIter->next(cacheIter) : false; bool tn = tfileIter ? tfileIter->next(tfileIter) : false; - SIdxTempResult* tr = sIdxTempResultCreate(); + SIdxTRslt* tr = idxTRsltCreate(); while (cn == true || tn == true) { IterateValue* cv = (cn == true) ? cacheIter->getValue(cacheIter) : NULL; IterateValue* tv = (tn == true) ? tfileIter->getValue(tfileIter) : NULL; @@ -515,7 +517,7 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) { } } indexMayMergeTempToFinalResult(result, NULL, tr); - sIdxTempResultDestroy(tr); + idxTRsltDestroy(tr); int ret = indexGenTFile(sIdx, pCache, result); indexDestroyFinalResult(result); diff --git a/source/libs/index/src/indexCache.c b/source/libs/index/src/indexCache.c index 6e52c4b1ba..3b33006452 100644 --- a/source/libs/index/src/indexCache.c +++ b/source/libs/index/src/indexCache.c @@ -36,32 +36,31 @@ static char* indexCacheTermGet(const void* pData); static MemTable* indexInternalCacheCreate(int8_t type); -static int32_t cacheSearchTerm(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchPrefix(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchSuffix(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchRegex(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchLessThan(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchRange(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); +static int32_t cacheSearchTerm(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchPrefix(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchSuffix(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchRegex(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchLessThan(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchRange(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); /*comm func of compare, used in (LE/LT/GE/GT compare)*/ -static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s, - RangeType type); -static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); +static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s, RangeType type); +static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); -static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s, +static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s, RangeType type); -static int32_t (*cacheSearch[][QUERY_MAX])(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s) = { +static int32_t (*cacheSearch[][QUERY_MAX])(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s) = { {cacheSearchTerm, cacheSearchPrefix, cacheSearchSuffix, cacheSearchRegex, cacheSearchLessThan, cacheSearchLessEqual, cacheSearchGreaterThan, cacheSearchGreaterEqual, cacheSearchRange}, {cacheSearchTerm_JSON, cacheSearchPrefix_JSON, cacheSearchSuffix_JSON, cacheSearchRegex_JSON, @@ -71,7 +70,7 @@ static int32_t (*cacheSearch[][QUERY_MAX])(void* cache, SIndexTerm* ct, SIdxTemp static void doMergeWork(SSchedMsg* msg); static bool indexCacheIteratorNext(Iterate* itera); -static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { if (cache == NULL) { return 0; } @@ -93,11 +92,11 @@ static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr CacheTerm* c = (CacheTerm*)SL_GET_NODE_DATA(node); if (0 == strcmp(c->colVal, pCt->colVal)) { if (c->operaType == ADD_VALUE) { - INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid) + INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid) // taosArrayPush(result, &c->uid); *s = kTypeValue; } else if (c->operaType == DEL_VALUE) { - INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid) + INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid) } } else { break; @@ -108,20 +107,19 @@ static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr tSkipListDestroyIter(iter); return 0; } -static int32_t cacheSearchPrefix(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchPrefix(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { // impl later return 0; } -static int32_t cacheSearchSuffix(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchSuffix(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { // impl later return 0; } -static int32_t cacheSearchRegex(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchRegex(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { // impl later return 0; } -static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s, - RangeType type) { +static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s, RangeType type) { if (cache == NULL) { return 0; } @@ -133,6 +131,7 @@ static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempRes CacheTerm* pCt = taosMemoryCalloc(1, sizeof(CacheTerm)); pCt->colVal = term->colVal; + pCt->colType = term->colType; pCt->version = atomic_load_64(&pCache->version); char* key = indexCacheTermGet(pCt); @@ -147,11 +146,11 @@ static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempRes TExeCond cond = cmpFn(c->colVal, pCt->colVal, pCt->colType); if (cond == MATCH) { if (c->operaType == ADD_VALUE) { - INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid) + INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid) // taosArrayPush(result, &c->uid); *s = kTypeValue; } else if (c->operaType == DEL_VALUE) { - INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid) + INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid) } } else if (cond == CONTINUE) { continue; @@ -163,20 +162,20 @@ static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempRes tSkipListDestroyIter(iter); return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchLessThan(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchLessThan(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc(cache, term, tr, s, LT); } -static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc(cache, term, tr, s, LE); } -static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc(cache, term, tr, s, GT); } -static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc(cache, term, tr, s, GE); } -static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { if (cache == NULL) { return 0; } @@ -204,11 +203,11 @@ static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResul if (0 == strcmp(c->colVal, pCt->colVal)) { if (c->operaType == ADD_VALUE) { - INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid) + INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid) // taosArrayPush(result, &c->uid); *s = kTypeValue; } else if (c->operaType == DEL_VALUE) { - INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid) + INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid) } } else { break; @@ -222,32 +221,32 @@ static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResul return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc_JSON(cache, term, tr, s, LT); } -static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc_JSON(cache, term, tr, s, LE); } -static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc_JSON(cache, term, tr, s, GT); } -static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc_JSON(cache, term, tr, s, GE); } -static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s, +static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s, RangeType type) { if (cache == NULL) { return 0; @@ -289,11 +288,11 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTe TExeCond cond = cmpFn(p + skip, term->colVal, dType); if (cond == MATCH) { if (c->operaType == ADD_VALUE) { - INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid) + INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid) // taosArrayPush(result, &c->uid); *s = kTypeValue; } else if (c->operaType == DEL_VALUE) { - INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid) + INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid) } } else if (cond == CONTINUE) { continue; @@ -309,7 +308,7 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTe return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchRange(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchRange(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { // impl later return 0; } @@ -568,7 +567,7 @@ int indexCacheDel(void* cache, const char* fieldValue, int32_t fvlen, uint64_t u return 0; } -static int32_t indexQueryMem(MemTable* mem, SIndexTermQuery* query, SIdxTempResult* tr, STermValueType* s) { +static int32_t indexQueryMem(MemTable* mem, SIndexTermQuery* query, SIdxTRslt* tr, STermValueType* s) { if (mem == NULL) { return 0; } @@ -582,7 +581,7 @@ static int32_t indexQueryMem(MemTable* mem, SIndexTermQuery* query, SIdxTempResu return cacheSearch[0][qtype](mem, term, tr, s); } } -int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTempResult* result, STermValueType* s) { +int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTRslt* result, STermValueType* s) { int64_t st = taosGetTimestampUs(); if (cache == NULL) { return 0; @@ -597,10 +596,10 @@ int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTempResult* result indexMemRef(imm); taosThreadMutexUnlock(&pCache->mtx); - int ret = indexQueryMem(mem, query, result, s); + int ret = (mem && mem->mem) ? indexQueryMem(mem, query, result, s) : 0; if (ret == 0 && *s != kTypeDeletion) { // continue search in imm - ret = indexQueryMem(imm, query, result, s); + ret = (imm && imm->mem) ? indexQueryMem(imm, query, result, s) : 0; } indexMemUnRef(mem); @@ -709,7 +708,7 @@ static int32_t indexCacheJsonTermCompare(const void* l, const void* r) { return cmp; } static MemTable* indexInternalCacheCreate(int8_t type) { - int ttype = INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? TSDB_DATA_TYPE_BINARY : type; + int ttype = INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? TSDB_DATA_TYPE_BINARY : TSDB_DATA_TYPE_BINARY; int32_t (*cmpFn)(const void* l, const void* r) = INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? indexCacheJsonTermCompare : indexCacheTermCompare; diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c index 0273867ccf..b882caa168 100644 --- a/source/libs/index/src/indexFilter.c +++ b/source/libs/index/src/indexFilter.c @@ -37,12 +37,15 @@ typedef struct SIFParam { int64_t suid; // add later char dbName[TSDB_DB_NAME_LEN]; char colName[TSDB_COL_NAME_LEN]; + + SIndexMetaArg arg; } SIFParam; typedef struct SIFCtx { - int32_t code; - SHashObj *pRes; /* element is SIFParam */ - bool noExec; // true: just iterate condition tree, and add hint to executor plan + int32_t code; + SHashObj * pRes; /* element is SIFParam */ + bool noExec; // true: just iterate condition tree, and add hint to executor plan + SIndexMetaArg arg; // SIdxFltStatus st; } SIFCtx; @@ -257,7 +260,9 @@ static int32_t sifExecFunction(SFunctionNode *node, SIFCtx *ctx, SIFParam *outpu return TSDB_CODE_QRY_INVALID_INPUT; } static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFParam *output) { - SIndexTerm *tm = indexTermCreate(left->suid, DEFAULT, left->colValType, left->colName, strlen(left->colName), +#ifdef USE_INVERTED_INDEX + SIndexMetaArg *arg = &output->arg; + SIndexTerm * tm = indexTermCreate(arg->suid, DEFAULT, left->colValType, left->colName, strlen(left->colName), right->condValue, strlen(right->condValue)); if (tm == NULL) { return TSDB_CODE_QRY_OUT_OF_MEMORY; @@ -268,9 +273,13 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP SIndexMultiTermQuery *mtm = indexMultiTermQueryCreate(MUST); indexMultiTermQueryAdd(mtm, tm, qtype); - int ret = indexSearch(NULL, mtm, output->result); + int ret = indexSearch(arg->metaHandle, mtm, output->result); + indexDebug("index filter data size: %d", (int)taosArrayGetSize(output->result)); indexMultiTermQueryDestroy(mtm); return ret; +#else + return 0; +#endif } static int32_t sifLessThanFunc(SIFParam *left, SIFParam *right, SIFParam *output) { @@ -372,6 +381,8 @@ static int32_t sifExecOper(SOperatorNode *node, SIFCtx *ctx, SIFParam *output) { SIFParam *params = NULL; SIF_ERR_RET(sifInitOperParams(¶ms, node, ctx)); + // ugly code, refactor later + output->arg = ctx->arg; sif_func_t operFn = sifGetOperFn(node->opType); if (ctx->noExec && operFn == NULL) { output->status = SFLT_NOT_INDEX; @@ -423,7 +434,7 @@ _return: static EDealRes sifWalkFunction(SNode *pNode, void *context) { SFunctionNode *node = (SFunctionNode *)pNode; - SIFParam output = {0}; + SIFParam output = {.result = taosArrayInit(8, sizeof(uint64_t))}; SIFCtx *ctx = context; ctx->code = sifExecFunction(node, ctx, &output); @@ -439,7 +450,8 @@ static EDealRes sifWalkFunction(SNode *pNode, void *context) { } static EDealRes sifWalkLogic(SNode *pNode, void *context) { SLogicConditionNode *node = (SLogicConditionNode *)pNode; - SIFParam output = {0}; + + SIFParam output = {.result = taosArrayInit(8, sizeof(uint64_t))}; SIFCtx *ctx = context; ctx->code = sifExecLogic(node, ctx, &output); @@ -455,7 +467,7 @@ static EDealRes sifWalkLogic(SNode *pNode, void *context) { } static EDealRes sifWalkOper(SNode *pNode, void *context) { SOperatorNode *node = (SOperatorNode *)pNode; - SIFParam output = {0}; + SIFParam output = {.result = taosArrayInit(8, sizeof(uint64_t))}; SIFCtx *ctx = context; ctx->code = sifExecOper(node, ctx, &output); @@ -507,8 +519,9 @@ static int32_t sifCalculate(SNode *pNode, SIFParam *pDst) { return TSDB_CODE_QRY_INVALID_INPUT; } int32_t code = 0; - SIFCtx ctx = {.code = 0, .noExec = false}; + SIFCtx ctx = {.code = 0, .noExec = false, .arg = pDst->arg}; ctx.pRes = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + if (NULL == ctx.pRes) { indexError("index-filter failed to taosHashInit"); return TSDB_CODE_QRY_OUT_OF_MEMORY; @@ -523,7 +536,9 @@ static int32_t sifCalculate(SNode *pNode, SIFParam *pDst) { indexError("no valid res in hash, node:(%p), type(%d)", (void *)&pNode, nodeType(pNode)); SIF_ERR_RET(TSDB_CODE_QRY_APP_ERROR); } - taosArrayAddAll(pDst->result, res->result); + if (res->result != NULL) { + taosArrayAddAll(pDst->result, res->result); + } sifFreeParam(res); taosHashRemove(ctx.pRes, (void *)&pNode, POINTER_BYTES); @@ -561,7 +576,7 @@ static int32_t sifGetFltHint(SNode *pNode, SIdxFltStatus *status) { SIF_RET(code); } -int32_t doFilterTag(const SNode *pFilterNode, SArray *result) { +int32_t doFilterTag(const SNode *pFilterNode, SIndexMetaArg *metaArg, SArray *result) { if (pFilterNode == NULL) { return TSDB_CODE_SUCCESS; } @@ -570,10 +585,12 @@ int32_t doFilterTag(const SNode *pFilterNode, SArray *result) { // todo move to the initialization function // SIF_ERR_RET(filterInitFromNode((SNode *)pFilterNode, &filter, 0)); - SIFParam param = {0}; + SArray * output = taosArrayInit(8, sizeof(uint64_t)); + SIFParam param = {.arg = *metaArg, .result = output}; SIF_ERR_RET(sifCalculate((SNode *)pFilterNode, ¶m)); taosArrayAddAll(result, param.result); + // taosArrayAddAll(result, param.result); sifFreeParam(¶m); SIF_RET(TSDB_CODE_SUCCESS); } diff --git a/source/libs/index/src/indexTfile.c b/source/libs/index/src/indexTfile.c index 3de556e8b5..53dd2923ac 100644 --- a/source/libs/index/src/indexTfile.c +++ b/source/libs/index/src/indexTfile.c @@ -60,31 +60,31 @@ static void tfileGenFileFullName(char* fullname, const char* path, uint64_t s /* * search from tfile */ -static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTempResult* tr); +static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTRslt* tr); -static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType ctype); +static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType ctype); -static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); +static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); -static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType ctype); +static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType ctype); -static int32_t (*tfSearch[][QUERY_MAX])(void* reader, SIndexTerm* tem, SIdxTempResult* tr) = { +static int32_t (*tfSearch[][QUERY_MAX])(void* reader, SIndexTerm* tem, SIdxTRslt* tr) = { {tfSearchTerm, tfSearchPrefix, tfSearchSuffix, tfSearchRegex, tfSearchLessThan, tfSearchLessEqual, tfSearchGreaterThan, tfSearchGreaterEqual, tfSearchRange}, {tfSearchTerm_JSON, tfSearchPrefix_JSON, tfSearchSuffix_JSON, tfSearchRegex_JSON, tfSearchLessThan_JSON, @@ -211,16 +211,16 @@ void tfileReaderDestroy(TFileReader* reader) { } // T_REF_INC(reader); fstDestroy(reader->fst); - writerCtxDestroy(reader->ctx, reader->remove); if (reader->remove) { indexInfo("%s is removed", reader->ctx->file.buf); } else { indexInfo("%s is not removed", reader->ctx->file.buf); } + writerCtxDestroy(reader->ctx, reader->remove); taosMemoryFree(reader); } -static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { int ret = 0; char* p = tem->colVal; uint64_t sz = tem->nColVal; @@ -243,7 +243,7 @@ static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { return 0; } -static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON); char* p = tem->colVal; uint64_t sz = tem->nColVal; @@ -279,7 +279,7 @@ static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTempResult* tr) } return 0; } -static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON); int ret = 0; @@ -298,7 +298,7 @@ static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTempResult* tr) fstSliceDestroy(&key); return 0; } -static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON); int ret = 0; @@ -319,7 +319,7 @@ static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTempResult* tr) return 0; } -static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType type) { +static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType type) { int ret = 0; char* p = tem->colVal; int skip = 0; @@ -358,19 +358,19 @@ static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTempResult fstStreamBuilderDestroy(sb); return TSDB_CODE_SUCCESS; } -static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc(reader, tem, tr, LT); } -static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc(reader, tem, tr, LE); } -static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc(reader, tem, tr, GT); } -static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc(reader, tem, tr, GE); } -static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON); int ret = 0; char* p = tem->colVal; @@ -399,7 +399,7 @@ static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTempResult* tr) fstSliceDestroy(&key); return 0; } -static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { int ret = 0; char* p = indexPackJsonData(tem); int sz = strlen(p); @@ -424,36 +424,36 @@ static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* // deprecate api return TSDB_CODE_SUCCESS; } -static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { // impl later return TSDB_CODE_SUCCESS; } -static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { // impl later return TSDB_CODE_SUCCESS; } -static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { // impl later return TSDB_CODE_SUCCESS; } -static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc_JSON(reader, tem, tr, LT); } -static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc_JSON(reader, tem, tr, LE); } -static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc_JSON(reader, tem, tr, GT); } -static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc_JSON(reader, tem, tr, GE); } -static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { // impl later return TSDB_CODE_SUCCESS; } -static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType ctype) { +static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType ctype) { int ret = 0; int skip = 0; @@ -501,7 +501,7 @@ static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTempR fstStreamBuilderDestroy(sb); return TSDB_CODE_SUCCESS; } -int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTempResult* tr) { +int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTRslt* tr) { SIndexTerm* term = query->term; EIndexQueryType qtype = query->qType; int ret = 0; @@ -673,7 +673,7 @@ void indexTFileDestroy(IndexTFile* tfile) { taosMemoryFree(tfile); } -int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTempResult* result) { +int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTRslt* result) { int ret = -1; if (tfile == NULL) { return ret; diff --git a/source/libs/index/src/indexUtil.c b/source/libs/index/src/indexUtil.c index a618787fd4..1d20278895 100644 --- a/source/libs/index/src/indexUtil.c +++ b/source/libs/index/src/indexUtil.c @@ -36,24 +36,24 @@ static int iBinarySearch(SArray *arr, int s, int e, uint64_t k) { return s; } -void iIntersection(SArray *inters, SArray *final) { - int32_t sz = (int32_t)taosArrayGetSize(inters); +void iIntersection(SArray *in, SArray *out) { + int32_t sz = (int32_t)taosArrayGetSize(in); if (sz <= 0) { return; } MergeIndex *mi = taosMemoryCalloc(sz, sizeof(MergeIndex)); for (int i = 0; i < sz; i++) { - SArray *t = taosArrayGetP(inters, i); + SArray *t = taosArrayGetP(in, i); mi[i].len = (int32_t)taosArrayGetSize(t); mi[i].idx = 0; } - SArray *base = taosArrayGetP(inters, 0); + SArray *base = taosArrayGetP(in, 0); for (int i = 0; i < taosArrayGetSize(base); i++) { uint64_t tgt = *(uint64_t *)taosArrayGet(base, i); bool has = true; - for (int j = 1; j < taosArrayGetSize(inters); j++) { - SArray *oth = taosArrayGetP(inters, j); + for (int j = 1; j < taosArrayGetSize(in); j++) { + SArray *oth = taosArrayGetP(in, j); int mid = iBinarySearch(oth, mi[j].idx, mi[j].len - 1, tgt); if (mid >= 0 && mid < mi[j].len) { uint64_t val = *(uint64_t *)taosArrayGet(oth, mid); @@ -64,33 +64,33 @@ void iIntersection(SArray *inters, SArray *final) { } } if (has == true) { - taosArrayPush(final, &tgt); + taosArrayPush(out, &tgt); } } taosMemoryFreeClear(mi); } -void iUnion(SArray *inters, SArray *final) { - int32_t sz = (int32_t)taosArrayGetSize(inters); +void iUnion(SArray *in, SArray *out) { + int32_t sz = (int32_t)taosArrayGetSize(in); if (sz <= 0) { return; } if (sz == 1) { - taosArrayAddAll(final, taosArrayGetP(inters, 0)); + taosArrayAddAll(out, taosArrayGetP(in, 0)); return; } MergeIndex *mi = taosMemoryCalloc(sz, sizeof(MergeIndex)); for (int i = 0; i < sz; i++) { - SArray *t = taosArrayGetP(inters, i); + SArray *t = taosArrayGetP(in, i); mi[i].len = (int32_t)taosArrayGetSize(t); mi[i].idx = 0; } while (1) { - uint64_t mVal = UINT_MAX; + uint64_t mVal = UINT64_MAX; int mIdx = -1; for (int j = 0; j < sz; j++) { - SArray *t = taosArrayGetP(inters, j); + SArray *t = taosArrayGetP(in, j); if (mi[j].idx >= mi[j].len) { continue; } @@ -102,13 +102,13 @@ void iUnion(SArray *inters, SArray *final) { } if (mIdx != -1) { mi[mIdx].idx++; - if (taosArrayGetSize(final) > 0) { - uint64_t lVal = *(uint64_t *)taosArrayGetLast(final); + if (taosArrayGetSize(out) > 0) { + uint64_t lVal = *(uint64_t *)taosArrayGetLast(out); if (lVal == mVal) { continue; } } - taosArrayPush(final, &mVal); + taosArrayPush(out, &mVal); } else { break; } @@ -158,41 +158,44 @@ int verdataCompare(const void *a, const void *b) { return cmp; } -SIdxTempResult *sIdxTempResultCreate() { - SIdxTempResult *tr = taosMemoryCalloc(1, sizeof(SIdxTempResult)); +SIdxTRslt *idxTRsltCreate() { + SIdxTRslt *tr = taosMemoryCalloc(1, sizeof(SIdxTRslt)); tr->total = taosArrayInit(4, sizeof(uint64_t)); - tr->added = taosArrayInit(4, sizeof(uint64_t)); - tr->deled = taosArrayInit(4, sizeof(uint64_t)); + tr->add = taosArrayInit(4, sizeof(uint64_t)); + tr->del = taosArrayInit(4, sizeof(uint64_t)); return tr; } -void sIdxTempResultClear(SIdxTempResult *tr) { +void idxTRsltClear(SIdxTRslt *tr) { if (tr == NULL) { return; } taosArrayClear(tr->total); - taosArrayClear(tr->added); - taosArrayClear(tr->deled); + taosArrayClear(tr->add); + taosArrayClear(tr->del); } -void sIdxTempResultDestroy(SIdxTempResult *tr) { +void idxTRsltDestroy(SIdxTRslt *tr) { if (tr == NULL) { return; } taosArrayDestroy(tr->total); - taosArrayDestroy(tr->added); - taosArrayDestroy(tr->deled); + taosArrayDestroy(tr->add); + taosArrayDestroy(tr->del); } -void sIdxTempResultMergeTo(SArray *result, SIdxTempResult *tr) { +void idxTRsltMergeTo(SIdxTRslt *tr, SArray *result) { taosArraySort(tr->total, uidCompare); - taosArraySort(tr->added, uidCompare); - taosArraySort(tr->deled, uidCompare); + taosArraySort(tr->add, uidCompare); + taosArraySort(tr->del, uidCompare); - SArray *arrs = taosArrayInit(2, sizeof(void *)); - taosArrayPush(arrs, &tr->total); - taosArrayPush(arrs, &tr->added); - - iUnion(arrs, result); - taosArrayDestroy(arrs); - - iExcept(result, tr->deled); + if (taosArrayGetSize(tr->total) == 0 || taosArrayGetSize(tr->add) == 0) { + SArray *t = taosArrayGetSize(tr->total) == 0 ? tr->add : tr->total; + taosArrayAddAll(result, t); + } else { + SArray *arrs = taosArrayInit(2, sizeof(void *)); + taosArrayPush(arrs, &tr->total); + taosArrayPush(arrs, &tr->add); + iUnion(arrs, result); + taosArrayDestroy(arrs); + } + iExcept(result, tr->del); } diff --git a/source/libs/index/test/CMakeLists.txt b/source/libs/index/test/CMakeLists.txt index c0b47e74c6..040460ae5c 100644 --- a/source/libs/index/test/CMakeLists.txt +++ b/source/libs/index/test/CMakeLists.txt @@ -1,74 +1,74 @@ -add_executable(indexTest "") -add_executable(fstTest "") -add_executable(fstUT "") -add_executable(UtilUT "") -add_executable(jsonUT "") +add_executable(idxTest "") +add_executable(idxFstTest "") +add_executable(idxFstUT "") +add_executable(idxUtilUT "") +add_executable(idxJsonUT "") -target_sources(indexTest +target_sources(idxTest PRIVATE "indexTests.cc" ) -target_sources(fstTest +target_sources(idxFstTest PRIVATE "fstTest.cc" ) -target_sources(fstUT +target_sources(idxFstUT PRIVATE "fstUT.cc" ) -target_sources(UtilUT +target_sources(idxUtilUT PRIVATE "utilUT.cc" ) -target_sources(jsonUT +target_sources(idxJsonUT PRIVATE "jsonUT.cc" ) -target_include_directories ( indexTest +target_include_directories (idxTest PUBLIC "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_include_directories ( fstTest +target_include_directories (idxFstTest PUBLIC "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_include_directories ( fstUT +target_include_directories (idxFstUT PUBLIC "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_include_directories ( UtilUT +target_include_directories (idxUtilUT PUBLIC "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_include_directories (jsonUT +target_include_directories (idxJsonUT PUBLIC "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_link_libraries (indexTest +target_link_libraries (idxTest os util common gtest_main index ) -target_link_libraries (fstTest +target_link_libraries (idxFstTest os util common gtest_main index ) -target_link_libraries (fstUT +target_link_libraries (idxFstUT os util common @@ -76,7 +76,7 @@ target_link_libraries (fstUT index ) -target_link_libraries (UtilUT +target_link_libraries (idxUtilUT os util common @@ -84,7 +84,7 @@ target_link_libraries (UtilUT index ) -target_link_libraries (jsonUT +target_link_libraries (idxJsonUT os util common @@ -94,17 +94,17 @@ target_link_libraries (jsonUT add_test( NAME idxtest - COMMAND indexTest + COMMAND idxTest ) add_test( NAME idxJsonUT - COMMAND jsonUT + COMMAND idxJsonUT ) add_test( NAME idxUtilUT - COMMAND UtilUT + COMMAND idxUtilUT ) add_test( NAME idxFstUT - COMMAND fstUT + COMMAND idxFstUT ) diff --git a/source/libs/index/test/indexTests.cc b/source/libs/index/test/indexTests.cc index 2d06002af8..77635af6fe 100644 --- a/source/libs/index/test/indexTests.cc +++ b/source/libs/index/test/indexTests.cc @@ -411,12 +411,12 @@ class TFileObj { // // } - SIdxTempResult* tr = sIdxTempResultCreate(); + SIdxTRslt* tr = idxTRsltCreate(); int ret = tfileReaderSearch(reader_, query, tr); - sIdxTempResultMergeTo(result, tr); - sIdxTempResultDestroy(tr); + idxTRsltMergeTo(tr, result); + idxTRsltDestroy(tr); return ret; } ~TFileObj() { @@ -531,11 +531,11 @@ class CacheObj { indexCacheDebug(cache); } int Get(SIndexTermQuery* query, int16_t colId, int32_t version, SArray* result, STermValueType* s) { - SIdxTempResult* tr = sIdxTempResultCreate(); + SIdxTRslt* tr = idxTRsltCreate(); int ret = indexCacheSearch(cache, query, tr, s); - sIdxTempResultMergeTo(result, tr); - sIdxTempResultDestroy(tr); + idxTRsltMergeTo(tr, result); + idxTRsltDestroy(tr); if (ret != 0) { std::cout << "failed to get from cache:" << ret << std::endl; diff --git a/source/libs/index/test/utilUT.cc b/source/libs/index/test/utilUT.cc index 18a2b457c4..4a30160244 100644 --- a/source/libs/index/test/utilUT.cc +++ b/source/libs/index/test/utilUT.cc @@ -226,6 +226,22 @@ TEST_F(UtilEnv, 04union) { iUnion(src, rslt); assert(taosArrayGetSize(rslt) == 12); } +TEST_F(UtilEnv, 05unionExcept) { + clearSourceArray(src); + clearFinalArray(rslt); + + uint64_t arr2[] = {7}; + SArray * f = (SArray *)taosArrayGetP(src, 1); + for (int i = 0; i < sizeof(arr2) / sizeof(arr2[0]); i++) { + taosArrayPush(f, &arr2[i]); + } + + iUnion(src, rslt); + + SArray *ept = taosArrayInit(0, sizeof(uint64_t)); + iExcept(rslt, ept); + EXPECT_EQ(taosArrayGetSize(rslt), 1); +} TEST_F(UtilEnv, 01Except) { SArray *total = taosArrayInit(4, sizeof(uint64_t)); { @@ -308,16 +324,36 @@ TEST_F(UtilEnv, 01Except) { ASSERT_EQ(*(uint64_t *)taosArrayGet(total, 1), 100); } TEST_F(UtilEnv, testFill) { - for (int i = 0; i < 10000000; i++) { + for (int i = 0; i < 1000000; i++) { int64_t val = i; char buf[65] = {0}; indexInt2str(val, buf, 1); EXPECT_EQ(val, taosStr2int64(buf)); } - for (int i = 0; i < 10000000; i++) { + for (int i = 0; i < 1000000; i++) { int64_t val = 0 - i; char buf[65] = {0}; indexInt2str(val, buf, -1); EXPECT_EQ(val, taosStr2int64(buf)); } } +TEST_F(UtilEnv, TempResult) { + SIdxTRslt *relt = idxTRsltCreate(); + + SArray *f = taosArrayInit(0, sizeof(uint64_t)); + + uint64_t val = UINT64_MAX - 1; + taosArrayPush(relt->add, &val); + idxTRsltMergeTo(relt, f); + EXPECT_EQ(taosArrayGetSize(f), 1); +} +TEST_F(UtilEnv, TempResultExcept) { + SIdxTRslt *relt = idxTRsltCreate(); + + SArray *f = taosArrayInit(0, sizeof(uint64_t)); + + uint64_t val = UINT64_MAX; + taosArrayPush(relt->add, &val); + idxTRsltMergeTo(relt, f); + EXPECT_EQ(taosArrayGetSize(f), 1); +} diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 8887b9841a..9c742cfb46 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1130,6 +1130,9 @@ static const char* jkTableScanPhysiPlanOffset = "Offset"; static const char* jkTableScanPhysiPlanSliding = "Sliding"; static const char* jkTableScanPhysiPlanIntervalUnit = "intervalUnit"; static const char* jkTableScanPhysiPlanSlidingUnit = "slidingUnit"; +static const char* jkTableScanPhysiPlanTriggerType = "triggerType"; +static const char* jkTableScanPhysiPlanWatermark = "watermark"; +static const char* jkTableScanPhysiPlanTsColId = "tsColId"; static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) { const STableScanPhysiNode* pNode = (const STableScanPhysiNode*)pObj; @@ -1171,6 +1174,15 @@ static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanSlidingUnit, pNode->slidingUnit); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanTriggerType, pNode->triggerType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanWatermark, pNode->watermark); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanTsColId, pNode->tsColId); + } return code; } @@ -1221,6 +1233,15 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) { tjsonGetNumberValue(pJson, jkTableScanPhysiPlanSlidingUnit, pNode->slidingUnit, code); ; } + if (TSDB_CODE_SUCCESS == code) { + tjsonGetNumberValue(pJson, jkTableScanPhysiPlanTriggerType, pNode->triggerType, code); + } + if (TSDB_CODE_SUCCESS == code) { + tjsonGetNumberValue(pJson, jkTableScanPhysiPlanWatermark, pNode->watermark, code); + } + if (TSDB_CODE_SUCCESS == code) { + tjsonGetNumberValue(pJson, jkTableScanPhysiPlanTsColId, pNode->tsColId, code); + } return code; } diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 4d489f68e7..adc07fcd0d 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -223,6 +223,9 @@ static void setScanWindowInfo(SScanLogicNode* pScan) { pScan->sliding = ((SWindowLogicNode*)pScan->node.pParent)->sliding; pScan->intervalUnit = ((SWindowLogicNode*)pScan->node.pParent)->intervalUnit; pScan->slidingUnit = ((SWindowLogicNode*)pScan->node.pParent)->slidingUnit; + pScan->triggerType = ((SWindowLogicNode*)pScan->node.pParent)->triggerType; + pScan->watermark = ((SWindowLogicNode*)pScan->node.pParent)->watermark; + pScan->tsColId = ((SColumnNode*)((SWindowLogicNode*)pScan->node.pParent)->pTspk)->colId; } } diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 0f88a54e91..a45eabefb9 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -503,6 +503,9 @@ static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubp pTableScan->sliding = pScanLogicNode->sliding; pTableScan->intervalUnit = pScanLogicNode->intervalUnit; pTableScan->slidingUnit = pScanLogicNode->slidingUnit; + pTableScan->triggerType = pScanLogicNode->triggerType; + pTableScan->watermark = pScanLogicNode->watermark; + pTableScan->tsColId = pScanLogicNode->tsColId; return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTableScan, pPhyNode); } diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 0d47595b3e..18c1f08c6c 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -925,10 +925,9 @@ int32_t toUnixtimestampFunction(SScalarParam *pInput, int32_t inputNum, SScalarP int32_t ret = convertStringToTimestamp(type, input, timePrec, &timeVal); if (ret != TSDB_CODE_SUCCESS) { colDataAppendNULL(pOutput->columnData, i); - continue; + } else { + colDataAppend(pOutput->columnData, i, (char *)&timeVal, false); } - - colDataAppend(pOutput->columnData, i, (char *)&timeVal, false); } pOutput->numOfRows = pInput->numOfRows; diff --git a/source/libs/stream/src/tstream.c b/source/libs/stream/src/tstream.c index 7e4f83a693..cc9fc8bd80 100644 --- a/source/libs/stream/src/tstream.c +++ b/source/libs/stream/src/tstream.c @@ -35,6 +35,14 @@ void* streamDataBlockDecode(const void* buf, SStreamDataBlock* pInput) { return (void*)buf; } +static int32_t streamBuildDispatchMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMsg, SEpSet** ppEpSet) { + SStreamDispatchReq req = { + .streamId = pTask->streamId, + .data = data, + }; + return 0; +} + static int32_t streamBuildExecMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMsg, SEpSet** ppEpSet) { SStreamTaskExecReq req = { .streamId = pTask->streamId, @@ -407,6 +415,26 @@ int32_t streamProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp) return 0; } +int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* pReq) { + if (tStartEncode(pEncoder) < 0) return -1; + if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->taskId) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->sourceTaskId) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->sourceVg) < 0) return -1; + tEndEncode(pEncoder); + return 0; +} + +int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) { + if (tStartDecode(pDecoder) < 0) return -1; + if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->sourceTaskId) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->sourceVg) < 0) return -1; + tEndDecode(pDecoder); + return 0; +} + int32_t tEncodeSStreamTaskExecReq(void** buf, const SStreamTaskExecReq* pReq) { int32_t tlen = 0; tlen += taosEncodeFixedI64(buf, pReq->streamId); diff --git a/source/libs/stream/src/tstreamUpdate.c b/source/libs/stream/src/tstreamUpdate.c index 75319a2354..7587fcecc9 100644 --- a/source/libs/stream/src/tstreamUpdate.c +++ b/source/libs/stream/src/tstreamUpdate.c @@ -72,12 +72,14 @@ static int64_t adjustInterval(int64_t interval, int32_t precision) { return val; } -static int64_t adjustWatermark(int64_t interval, int64_t watermark) { - if (watermark <= 0 || watermark > MAX_NUM_SCALABLE_BF * interval) { - watermark = MAX_NUM_SCALABLE_BF * interval; - } else if (watermark < MIN_NUM_SCALABLE_BF * interval) { - watermark = MIN_NUM_SCALABLE_BF * interval; - } +static int64_t adjustWatermark(int64_t adjInterval, int64_t originInt, int64_t watermark) { + if (watermark <= 0) { + watermark = TMIN(originInt/adjInterval, 1) * adjInterval; + } else if (watermark > MAX_NUM_SCALABLE_BF * adjInterval) { + watermark = MAX_NUM_SCALABLE_BF * adjInterval; + }/* else if (watermark < MIN_NUM_SCALABLE_BF * adjInterval) { + watermark = MIN_NUM_SCALABLE_BF * adjInterval; + }*/ // Todo(liuyao) save window info to tdb return watermark; } @@ -94,7 +96,7 @@ SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t waterma pInfo->pTsSBFs = NULL; pInfo->minTS = -1; pInfo->interval = adjustInterval(interval, precision); - pInfo->watermark = adjustWatermark(pInfo->interval, watermark); + pInfo->watermark = adjustWatermark(pInfo->interval, interval, watermark); uint64_t bfSize = (uint64_t)(pInfo->watermark / pInfo->interval); @@ -149,13 +151,18 @@ static SScalableBf *getSBf(SUpdateInfo *pInfo, TSKEY ts) { bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts) { int32_t res = TSDB_CODE_FAILED; uint64_t index = ((uint64_t)tableId) % pInfo->numBuckets; + TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index); + if (ts < maxTs - pInfo->watermark) { + // this window has been closed. + return true; + } + SScalableBf *pSBf = getSBf(pInfo, ts); // pSBf may be a null pointer if (pSBf) { res = tScalableBfPut(pSBf, &ts, sizeof(TSKEY)); } - TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index); if (maxTs < ts) { taosArraySet(pInfo->pTsBuckets, index, &ts); return false; diff --git a/tests/pytest/stream/test3.py b/tests/pytest/stream/test3.py new file mode 100644 index 0000000000..b45521a947 --- /dev/null +++ b/tests/pytest/stream/test3.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.common import tdCom +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + #for i in range(100): + tdSql.prepare() + dbname = tdCom.getLongName(10, "letters") + tdSql.execute('create database if not exists djnhawvlgq vgroups 1') + tdSql.execute('use djnhawvlgq') + tdSql.execute('create table if not exists downsampling_stb (ts timestamp, c1 int, c2 double, c3 varchar(100), c4 bool) tags (t1 int, t2 double, t3 varchar(100), t4 bool);') + tdSql.execute('create table downsampling_ct1 using downsampling_stb tags(10, 10.1, "Beijing", True);') + tdSql.execute('create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 nchar(20), c5 nchar(20)) tags (t1 int);') + tdSql.execute('create table scalar_ct1 using scalar_stb tags(10);') + tdSql.execute('create table if not exists data_filter_stb (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(100), t8 nchar(200), t9 bool, t10 tinyint unsigned, t11 smallint unsigned, t12 int unsigned, t13 bigint unsigned)') + tdSql.execute('create table if not exists data_filter_ct1 using data_filter_stb tags (1, 2, 3, 4, 5.5, 6.6, "binary7", "nchar8", true, 11, 12, 13, 14)') + tdSql.execute('create stream data_filter_stream into output_data_filter_stb as select * from data_filter_stb where ts >= 1653648072973+1s and c1 = 1 or c2 > 1 and c3 != 4 or c4 <= 3 and c5 <> 0 or c6 is not Null or c7 is Null or c8 between "na" and "nchar4" and c8 not between "bi" and "binary" and c8 match "nchar[19]" and c8 nmatch "nchar[25]" or c9 in (1, 2, 3) or c10 not in (6, 7) and c8 like "nch%" and c7 not like "bina_" and c11 <= 10 or c12 is Null or c13 >= 4;') + tdSql.execute('insert into data_filter_ct1 values (1653648072973, 1, 1, 1, 3, 1.1, 1.1, "binary1", "nchar1", true, 1, 2, 3, 4);') + tdSql.execute('insert into data_filter_ct1 values (1653648072973+1s, 2, 2, 1, 3, 1.1, 1.1, "binary2", "nchar2", true, 2, 3, 4, 5);') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 2869cd6fdf..e8d01de3e5 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -17,6 +17,10 @@ import os.path import platform import subprocess from time import sleep +import base64 +import json +import copy +from fabric2 import Connection from util.log import * @@ -111,6 +115,7 @@ class TDDnode: self.deployed = 0 self.testCluster = False self.valgrind = 0 + self.remoteIP = "" self.cfgDict = { "walLevel": "2", "fsync": "1000", @@ -137,8 +142,9 @@ class TDDnode: "telemetryReporting": "0" } - def init(self, path): + def init(self, path, remoteIP = ""): self.path = path + self.remoteIP = remoteIP def setTestCluster(self, value): self.testCluster = value @@ -162,6 +168,24 @@ class TDDnode: def addExtraCfg(self, option, value): self.cfgDict.update({option: value}) + def remoteExec(self, updateCfgDict, execCmd): + remote_conn = Connection(self.remoteIP, port=22, user='root', connect_kwargs={'password':'123456'}) + remote_top_dir = '~/test' + valgrindStr = '' + if (self.valgrind==1): + valgrindStr = '-g' + remoteCfgDict = copy.deepcopy(updateCfgDict) + if ("logDir" in remoteCfgDict): + del remoteCfgDict["logDir"] + if ("dataDir" in remoteCfgDict): + del remoteCfgDict["dataDir"] + if ("cfgDir" in remoteCfgDict): + del remoteCfgDict["cfgDir"] + remoteCfgDictStr = base64.b64encode(json.dumps(remoteCfgDict).encode()).decode() + execCmdStr = base64.b64encode(execCmd.encode()).decode() + with remote_conn.cd((remote_top_dir+sys.path[0].replace(self.path, '')).replace('\\','/')): + remote_conn.run("python3 ./test.py %s -d %s -e %s"%(valgrindStr,remoteCfgDictStr,execCmdStr)) + def deploy(self, *updatecfgDict): self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index) self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index) @@ -229,8 +253,11 @@ class TDDnode: self.cfg(value, key) else: self.addExtraCfg(key, value) - for key, value in self.cfgDict.items(): - self.cfg(key, value) + if (self.remoteIP == ""): + for key, value in self.cfgDict.items(): + self.cfg(key, value) + else: + self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)"%self.index) self.deployed = 1 tdLog.debug( @@ -268,117 +295,68 @@ class TDDnode: tdLog.exit("dnode:%d is not deployed" % (self.index)) if self.valgrind == 0: - cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( - binPath, self.cfgDir) + if platform.system().lower() == 'windows': + cmd = "mintty -h never -w hide %s -c %s" % ( + binPath, self.cfgDir) + else: + cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( + binPath, self.cfgDir) else: valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir - cmd = "nohup %s %s -c %s 2>&1 & " % ( - valgrindCmdline, binPath, self.cfgDir) + if platform.system().lower() == 'windows': + cmd = "mintty -h never -w hide %s %s -c %s" % ( + valgrindCmdline, binPath, self.cfgDir) + else: + cmd = "nohup %s %s -c %s 2>&1 & " % ( + valgrindCmdline, binPath, self.cfgDir) print(cmd) - if os.system(cmd) != 0: - tdLog.exit(cmd) - self.running = 1 - tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) - if self.valgrind == 0: - time.sleep(0.1) - key = 'from offline to online' - bkey = bytes(key, encoding="utf8") - logFile = self.logDir + "/taosdlog.0" - i = 0 - while not os.path.exists(logFile): - sleep(0.1) - i += 1 - if i > 50: - break - popen = subprocess.Popen( - 'tail -f ' + logFile, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True) - pid = popen.pid - # print('Popen.pid:' + str(pid)) - timeout = time.time() + 60 * 2 - while True: - line = popen.stdout.readline().strip() - if bkey in line: - popen.kill() - break - if time.time() > timeout: - tdLog.exit('wait too long for taosd start') - tdLog.debug("the dnode:%d has been started." % (self.index)) + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)\ntdDnodes.start(%d)"%(self.index, self.index)) + self.running = 1 else: - tdLog.debug( - "wait 10 seconds for the dnode:%d to start." % - (self.index)) - time.sleep(10) - - # time.sleep(5) - def startWin(self): - binPath = self.getPath("taosd.exe") - - if (binPath == ""): - tdLog.exit("taosd.exe not found!") - else: - tdLog.info("taosd.exe found: %s" % binPath) - - taosadapterBinPath = self.getPath("taosadapter.exe") - if (taosadapterBinPath == ""): - tdLog.info("taosAdapter.exe not found!") - else: - tdLog.info("taosAdapter.exe found in %s" % taosadapterBuildPath) - - if self.deployed == 0: - tdLog.exit("dnode:%d is not deployed" % (self.index)) - - cmd = "mintty -h never -w hide %s -c %s" % ( - binPath, self.cfgDir) - - if (taosadapterBinPath != ""): - taosadapterCmd = "mintty -h never -w hide %s --monitor.writeToTD=false " % ( - taosadapterBinPath) - if os.system(taosadapterCmd) != 0: - tdLog.exit(taosadapterCmd) - - if os.system(cmd) != 0: - tdLog.exit(cmd) - - self.running = 1 - tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) - if self.valgrind == 0: - time.sleep(0.1) - key = 'from offline to online' - bkey = bytes(key, encoding="utf8") - logFile = self.logDir + "/taosdlog.0" - i = 0 - while not os.path.exists(logFile): - sleep(0.1) - i += 1 - if i > 50: - break - popen = subprocess.Popen( - 'tail -n +0 -f ' + logFile, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True) - pid = popen.pid - # print('Popen.pid:' + str(pid)) - timeout = time.time() + 60 * 2 - while True: - line = popen.stdout.readline().strip() - if bkey in line: - popen.kill() - break - if time.time() > timeout: - tdLog.exit('wait too long for taosd start') - tdLog.debug("the dnode:%d has been started." % (self.index)) - else: - tdLog.debug( - "wait 10 seconds for the dnode:%d to start." % - (self.index)) - time.sleep(10) + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + print("dnode:%d is running with %s " % (self.index, cmd)) + tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) + if self.valgrind == 0: + time.sleep(0.1) + key = 'from offline to online' + bkey = bytes(key, encoding="utf8") + logFile = self.logDir + "/taosdlog.0" + i = 0 + while not os.path.exists(logFile): + sleep(0.1) + i += 1 + if i > 50: + break + tailCmdStr = 'tail -f ' + if platform.system().lower() == 'windows': + tailCmdStr = 'tail -n +0 -f ' + popen = subprocess.Popen( + tailCmdStr + logFile, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True) + pid = popen.pid + # print('Popen.pid:' + str(pid)) + timeout = time.time() + 60 * 2 + while True: + line = popen.stdout.readline().strip() + if bkey in line: + popen.kill() + break + if time.time() > timeout: + tdLog.exit('wait too long for taosd start') + tdLog.debug("the dnode:%d has been started." % (self.index)) + else: + tdLog.debug( + "wait 10 seconds for the dnode:%d to start." % + (self.index)) + time.sleep(10) def startWithoutSleep(self): binPath = self.getPath() @@ -402,12 +380,19 @@ class TDDnode: print(cmd) - if os.system(cmd) != 0: - tdLog.exit(cmd) + if (self.remoteIP == ""): + if os.system(cmd) != 0: + tdLog.exit(cmd) + else: + self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)\ntdDnodes.startWithoutSleep(%d)"%(self.index, self.index)) + self.running = 1 tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) def stop(self): + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.stop(%d)"%self.index) + return if self.valgrind == 0: toBeKilled = "taosd" else: @@ -435,6 +420,9 @@ class TDDnode: tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) def forcestop(self): + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.forcestop(%d)"%self.index) + return if self.valgrind == 0: toBeKilled = "taosd" else: @@ -499,8 +487,10 @@ class TDDnodes: self.dnodes.append(TDDnode(9)) self.dnodes.append(TDDnode(10)) self.simDeployed = False + self.testCluster = False + self.valgrind = 0 - def init(self, path): + def init(self, path, remoteIP = ""): psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): @@ -520,9 +510,9 @@ class TDDnodes: psCmd, shell=True).decode("utf-8") binPath = self.dnodes[0].getPath() + "/../../../" - tdLog.debug("binPath %s" % (binPath)) + # tdLog.debug("binPath %s" % (binPath)) binPath = os.path.realpath(binPath) - tdLog.debug("binPath real path %s" % (binPath)) + # tdLog.debug("binPath real path %s" % (binPath)) # cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath) # tdLog.debug(cmd) @@ -545,7 +535,7 @@ class TDDnodes: self.path = os.path.realpath(path) for i in range(len(self.dnodes)): - self.dnodes[i].init(self.path) + self.dnodes[i].init(self.path, remoteIP) self.sim = TDSimClient(self.path) def setTestCluster(self, value): @@ -572,10 +562,7 @@ class TDDnodes: def start(self, index): self.check(index) - if platform.system().lower() == 'windows': - self.dnodes[index - 1].startWin() - else: - self.dnodes[index - 1].start() + self.dnodes[index - 1].start() def startWithoutSleep(self, index): self.check(index) diff --git a/tests/script/tsim/stable/alter_count.sim b/tests/script/tsim/stable/alter_count.sim index 9c9ece7ee4..e5af9a5735 100644 --- a/tests/script/tsim/stable/alter_count.sim +++ b/tests/script/tsim/stable/alter_count.sim @@ -141,6 +141,8 @@ sql connect sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from d1.tb; sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from d1.tb; + +sql use d1 sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from tb if $data00 != 24 then return -1 diff --git a/tests/script/tsim/stream/triggerInterval0.sim b/tests/script/tsim/stream/triggerInterval0.sim new file mode 100644 index 0000000000..6f1d8f4b7b --- /dev/null +++ b/tests/script/tsim/stream/triggerInterval0.sim @@ -0,0 +1,185 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print =============== create database +sql create database test vgroups 1 +sql show databases +if $rows != 3 then + return -1 +endi + +print $data00 $data01 $data02 + +sql use test +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger window_close into streamt as select _wstartts, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s); + +sql insert into t1 values(1648791213001,1,2,3,1.0); +sleep 300 +sql select * from streamt; +if $rows != 0 then + print ======$rows + return -1 +endi + +sql insert into t1 values(1648791223001,2,2,3,1.1); +sql insert into t1 values(1648791223002,2,2,3,1.1); +sql insert into t1 values(1648791223003,2,2,3,1.1); +sql insert into t1 values(1648791223001,2,2,3,1.1); +sleep 300 +sql select * from streamt; +if $rows != 1 then + print ======$rows + return -1 +endi + +if $data01 != 1 then + print ======$data01 + return -1 +endi + +sql insert into t1 values(1648791233001,2,2,3,1.1); +sleep 300 +sql select * from streamt; +if $rows != 2 then + print ======$rows + return -1 +endi +if $data01 != 1 then + print ======$data01 + return -1 +endi +if $data11 != 3 then + print ======$data11 + return -1 +endi + +sql insert into t1 values(1648791223004,2,2,3,1.1); +sql insert into t1 values(1648791223004,2,2,3,1.1); +sql insert into t1 values(1648791223005,2,2,3,1.1); +sleep 300 +sql select * from streamt; +if $rows != 2 then + print ======$rows + return -1 +endi +if $data01 != 1 then + print ======$data01 + return -1 +endi +if $data11 != 5 then + print ======$data11 + return -1 +endi + + +sql insert into t1 values(1648791233002,3,2,3,2.1); +sql insert into t1 values(1648791213002,4,2,3,3.1) +sql insert into t1 values(1648791213002,4,2,3,4.1); +sleep 300 +sql select * from streamt; +if $rows != 2 then + print ======$rows + return -1 +endi +if $data01 != 2 then + print ======$data01 + return -1 +endi +if $data11 != 5 then + print ======$data11 + return -1 +endi + +sql create table t2(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger window_close watermark 20s into streamt2 as select _wstartts, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t2 interval(10s); +sql insert into t2 values(1648791213000,1,2,3,1.0); +sql insert into t2 values(1648791239999,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 0 then + print ======$rows + return -1 +endi + +sql insert into t2 values(1648791240000,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi +if $data01 != 1 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791250001,1,2,3,1.0) (1648791250002,1,2,3,1.0) (1648791250003,1,2,3,1.0) (1648791240000,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi +if $data01 != 1 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791280000,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 4 then + print ======$rows + return -1 +endi +if $data01 != 1 then + print ======$data01 + return -1 +endi +if $data11 != 1 then + print ======$data11 + return -1 +endi +if $data21 != 1 then + print ======$data21 + return -1 +endi +if $data31 != 3 then + print ======$data31 + return -1 +endi + +sql insert into t2 values(1648791250001,1,2,3,1.0) (1648791250002,1,2,3,1.0) (1648791250003,1,2,3,1.0) (1648791280000,1,2,3,1.0) (1648791280001,1,2,3,1.0) (1648791280002,1,2,3,1.0) (1648791310000,1,2,3,1.0) (1648791280001,1,2,3,1.0); +sleep 300 +sql select * from streamt2; + +if $rows != 5 then + print ======$rows + return -1 +endi +if $data01 != 1 then + print ======$data01 + return -1 +endi +if $data11 != 1 then + print ======$data11 + return -1 +endi +if $data21 != 1 then + print ======$data21 + return -1 +endi +if $data31 != 3 then + print ======$data31 + return -1 +endi +if $data41 != 3 then + print ======$data31 + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stream/triggerSession0.sim b/tests/script/tsim/stream/triggerSession0.sim new file mode 100644 index 0000000000..fb0666fdcf --- /dev/null +++ b/tests/script/tsim/stream/triggerSession0.sim @@ -0,0 +1,105 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print =============== create database +sql create database test vgroups 1 +sql show databases +if $rows != 3 then + return -1 +endi + +print $data00 $data01 $data02 + +sql use test +sql create table t2(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger window_close into streamt2 as select _wstartts, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t2 session(ts, 10s); + +sql insert into t2 values(1648791213000,1,2,3,1.0); +sql insert into t2 values(1648791222999,1,2,3,1.0); +sql insert into t2 values(1648791223000,1,2,3,1.0); +sql insert into t2 values(1648791223001,1,2,3,1.0); +sql insert into t2 values(1648791233001,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 0 then + print ======$rows + return -1 +endi + +sql insert into t2 values(1648791243002,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi + +if $data01 != 5 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791223001,1,2,3,1.0) (1648791223002,1,2,3,1.0) (1648791222999,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi + +if $data01 != 6 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791233002,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi + +if $data01 != 6 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791253003,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi + +if $data01 != 8 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791243003,1,2,3,1.0) (1648791243002,1,2,3,1.0) (1648791270004,1,2,3,1.0) (1648791280005,1,2,3,1.0) (1648791290006,1,2,3,1.0); +sleep 500 +sql select * from streamt2; +if $rows != 3 then + print ======$rows + return -1 +endi + +if $data01 != 10 then + print ======$data01 + return -1 +endi +if $data11 != 1 then + print ======$data11 + return -1 +endi +if $data21 != 1 then + print ======$data21 + return -1 +endi + +#system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/system-test/7-tmq/basic5.py b/tests/system-test/7-tmq/basic5.py index 4a29cacd97..500e867121 100644 --- a/tests/system-test/7-tmq/basic5.py +++ b/tests/system-test/7-tmq/basic5.py @@ -22,8 +22,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -186,7 +186,7 @@ class TDTestCase: time.sleep(1) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 @@ -228,7 +228,7 @@ class TDTestCase: 'stbName': 'stb', \ 'ctbNum': 10, \ 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ + 'batchNum': 200, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -300,7 +300,7 @@ class TDTestCase: time.sleep(1) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 @@ -349,8 +349,8 @@ class TDTestCase: 'vgroups': 1, \ 'stbName': 'stb', \ 'ctbNum': 10, \ - 'rowsPerTbl': 30000, \ - 'batchNum': 100, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 200, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -381,8 +381,8 @@ class TDTestCase: 'vgroups': 1, \ 'stbName': 'stb2', \ 'ctbNum': 10, \ - 'rowsPerTbl': 30000, \ - 'batchNum': 100, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 200, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict2['cfg'] = cfgPath tdSql.execute("create stable if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(parameterDict2['dbName'], parameterDict2['stbName'])) @@ -432,7 +432,7 @@ class TDTestCase: time.sleep(1) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 diff --git a/tests/system-test/7-tmq/subscribeDb.py b/tests/system-test/7-tmq/subscribeDb.py index 29a2342fb7..157bc7928b 100644 --- a/tests/system-test/7-tmq/subscribeDb.py +++ b/tests/system-test/7-tmq/subscribeDb.py @@ -22,8 +22,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -167,7 +167,7 @@ class TDTestCase: 'vgroups': 4, \ 'stbName': 'stb', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -197,7 +197,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -236,7 +236,7 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 20 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -264,7 +264,7 @@ class TDTestCase: 'vgroups': 4, \ 'stbName': 'stb', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -298,7 +298,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 20 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -313,8 +313,8 @@ class TDTestCase: for i in range(expectRows): totalConsumeRows += resultList[i] - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + if not (totalConsumeRows >= expectrowcnt): tdLog.exit("tmq consume rows error!") tdSql.query("drop topic %s"%topicName1) @@ -330,7 +330,7 @@ class TDTestCase: 'vgroups': 4, \ 'stbName': 'stb1', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -364,7 +364,7 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 10 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -399,7 +399,7 @@ class TDTestCase: 'vgroups': 4, \ 'stbName': 'stb', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -416,7 +416,7 @@ class TDTestCase: 'vgroups': 4, \ 'stbName': 'stb2', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -446,7 +446,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 10 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -470,336 +470,6 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 3 end ...... ") - def tmqCase4(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 4: Produce while two consumers to subscribe one db, include 2 stb") - tdLog.info("step 1: create database, stb, ctb and insert data") - # create and start thread - parameterDict = {'cfg': '', \ - 'dbName': 'db4', \ - 'vgroups': 4, \ - 'stbName': 'stb', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.initConsumerTable() - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - - parameterDict2 = {'cfg': '', \ - 'dbName': 'db4', \ - 'vgroups': 4, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - tdLog.info("create topics from db") - topicName1 = 'topic_db1' - - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) - - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] - topicList = topicName1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - event.wait() - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - # wait for data ready - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicName1) - - tdLog.printNoPrefix("======== test case 4 end ...... ") - - def tmqCase5(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 5: Produce while two consumers to subscribe one db, firstly create one stb, after start consume create other stb") - tdLog.info("step 1: create database, stb, ctb and insert data") - # create and start thread - parameterDict = {'cfg': '', \ - 'dbName': 'db5', \ - 'vgroups': 4, \ - 'stbName': 'stb', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.initConsumerTable() - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - - parameterDict2 = {'cfg': '', \ - 'dbName': 'db5', \ - 'vgroups': 4, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - tdLog.info("create topics from db") - topicName1 = 'topic_db1' - - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) - - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] - topicList = topicName1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - event.wait() - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - # wait for data ready - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows < expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicName1) - - tdLog.printNoPrefix("======== test case 5 end ...... ") - - def tmqCase6(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 6: Produce while one consumers to subscribe tow topic, Each contains one db") - tdLog.info("step 1: create database, stb, ctb and insert data") - # create and start thread - parameterDict = {'cfg': '', \ - 'dbName': 'db60', \ - 'vgroups': 4, \ - 'stbName': 'stb', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.initConsumerTable() - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - - parameterDict2 = {'cfg': '', \ - 'dbName': 'db61', \ - 'vgroups': 4, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups'])) - - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - tdLog.info("create topics from db") - topicName1 = 'topic_db60' - topicName2 = 'topic_db61' - - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) - tdSql.execute("create topic %s as %s" %(topicName2, parameterDict2['dbName'])) - - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] - topicList = topicName1 + ',' + topicName2 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - #consumerId = 1 - #self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - event.wait() - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - # wait for data ready - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicName1) - tdSql.query("drop topic %s"%topicName2) - - tdLog.printNoPrefix("======== test case 6 end ...... ") - - def tmqCase7(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 7: Produce while two consumers to subscribe tow topic, Each contains one db") - tdLog.info("step 1: create database, stb, ctb and insert data") - # create and start thread - parameterDict = {'cfg': '', \ - 'dbName': 'db70', \ - 'vgroups': 4, \ - 'stbName': 'stb', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.initConsumerTable() - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - - parameterDict2 = {'cfg': '', \ - 'dbName': 'db71', \ - 'vgroups': 4, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups'])) - - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - tdLog.info("create topics from db") - topicName1 = 'topic_db60' - topicName2 = 'topic_db61' - - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) - tdSql.execute("create topic %s as %s" %(topicName2, parameterDict2['dbName'])) - - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] - topicList = topicName1 + ',' + topicName2 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - event.wait() - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - # wait for data ready - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicName1) - tdSql.query("drop topic %s"%topicName2) - - tdLog.printNoPrefix("======== test case 7 end ...... ") - def run(self): tdSql.prepare() @@ -815,12 +485,7 @@ class TDTestCase: self.tmqCase2(cfgPath, buildPath) self.tmqCase2a(cfgPath, buildPath) self.tmqCase3(cfgPath, buildPath) - self.tmqCase4(cfgPath, buildPath) - self.tmqCase5(cfgPath, buildPath) - self.tmqCase6(cfgPath, buildPath) - self.tmqCase7(cfgPath, buildPath) - - + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/7-tmq/subscribeDb0.py b/tests/system-test/7-tmq/subscribeDb0.py new file mode 100644 index 0000000000..d6f93acfd6 --- /dev/null +++ b/tests/system-test/7-tmq/subscribeDb0.py @@ -0,0 +1,515 @@ + +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum): + tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("use %s" %dbName) + tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + event.set() + tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + t = time.time() + startTs = int(round(t * 1000)) + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + for i in range(ctbNum): + sql += " %s_%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + print ("input parameters:") + print (parameterDict) + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + self.create_tables(tsql,\ + parameterDict["dbName"],\ + parameterDict["vgroups"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"]) + + self.insert_data(tsql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"],\ + parameterDict["startTs"]) + return + + def tmqCase4(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 4: Produce while two consumers to subscribe one db, include 2 stb") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db4', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.initConsumerTable() + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db4', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 1 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # wait for data ready + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 4 end ...... ") + + def tmqCase5(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 5: Produce while two consumers to subscribe one db, firstly create one stb, after start consume create other stb") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db5', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.initConsumerTable() + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db5', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 1 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + # wait for data ready + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows < expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 5 end ...... ") + + def tmqCase6(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 6: Produce while one consumers to subscribe tow topic, Each contains one db") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db60', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.initConsumerTable() + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db61', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups'])) + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db60' + topicName2 = 'topic_db61' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as %s" %(topicName2, parameterDict2['dbName'])) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ',' + topicName2 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + #consumerId = 1 + #self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # wait for data ready + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + tdSql.query("drop topic %s"%topicName2) + + tdLog.printNoPrefix("======== test case 6 end ...... ") + + def tmqCase7(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 7: Produce while two consumers to subscribe tow topic, Each contains one db") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db70', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.initConsumerTable() + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db71', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups'])) + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db60' + topicName2 = 'topic_db61' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as %s" %(topicName2, parameterDict2['dbName'])) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ',' + topicName2 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 1 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # wait for data ready + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + tdSql.query("drop topic %s"%topicName2) + + tdLog.printNoPrefix("======== test case 7 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + self.tmqCase4(cfgPath, buildPath) + self.tmqCase5(cfgPath, buildPath) + self.tmqCase6(cfgPath, buildPath) + self.tmqCase7(cfgPath, buildPath) + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/subscribeDb1.py b/tests/system-test/7-tmq/subscribeDb1.py index 2b9d344b74..56db157ab8 100644 --- a/tests/system-test/7-tmq/subscribeDb1.py +++ b/tests/system-test/7-tmq/subscribeDb1.py @@ -22,8 +22,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -198,7 +198,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -276,7 +276,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -354,7 +354,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 15 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -425,7 +425,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) diff --git a/tests/system-test/7-tmq/subscribeStb.py b/tests/system-test/7-tmq/subscribeStb.py index a0b3668d47..2b7f0d3d5f 100644 --- a/tests/system-test/7-tmq/subscribeStb.py +++ b/tests/system-test/7-tmq/subscribeStb.py @@ -29,8 +29,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -346,1024 +346,6 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 2 end ...... ") - def tmqCase3(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 3: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db3', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 50000, \ - 'batchNum': 13, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - time.sleep(5) - tdLog.info("drop som child table of stb1") - dropTblNum = 4 - tdSql.query("drop table if exists %s.%s_1"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_2"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_3"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_4"%(parameterDict["dbName"], parameterDict["stbName"])) - - tdLog.info("drop some child tables, then start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - remaindrowcnt = parameterDict["rowsPerTbl"] * (parameterDict["ctbNum"] - dropTblNum) - - if not (totalConsumeRows < expectrowcnt and totalConsumeRows > remaindrowcnt): - tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, remaindrowcnt, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 3 end ...... ") - - def tmqCase4(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 4: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db4', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 4 end ...... ") - - def tmqCase5(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 5: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db5', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != (expectrowcnt * (1 + 1/4)): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 5 end ...... ") - - def tmqCase6(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 6: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db6', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 6 end ...... ") - - def tmqCase7(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 7: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db7', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 7 end ...... ") - - def tmqCase8(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 8: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db8', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 8 end ...... ") - - def tmqCase9(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 9: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db9', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 0 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 9 end ...... ") - - def tmqCase10(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 10: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db10', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt-10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt-10000: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt+10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 10 end ...... ") - - def tmqCase11(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 11: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db11', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 11 end ...... ") - - def tmqCase12(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 12: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db12', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 12 end ...... ") - - def tmqCase13(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 13: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db13', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/2,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*(1/2+1/4): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 13 end ...... ") - def run(self): tdSql.prepare() @@ -1377,9 +359,6 @@ class TDTestCase: self.tmqCase1(cfgPath, buildPath) self.tmqCase2(cfgPath, buildPath) - # self.tmqCase3(cfgPath, buildPath) - # self.tmqCase4(cfgPath, buildPath) - # self.tmqCase5(cfgPath, buildPath) def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/subscribeStb0.py b/tests/system-test/7-tmq/subscribeStb0.py index 7864a4bc73..a212cf7590 100644 --- a/tests/system-test/7-tmq/subscribeStb0.py +++ b/tests/system-test/7-tmq/subscribeStb0.py @@ -29,8 +29,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -183,169 +183,6 @@ class TDTestCase: return - def tmqCase1(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 1: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db1', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 100 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - time.sleep(5) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 1 end ...... ") - - def tmqCase2(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 2: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db2', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - parameterDict2 = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db2', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict2['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_stable(tdSql, parameterDict2["dbName"], parameterDict2["stbName"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 100 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start create child tables of stb1 and stb2") - parameterDict['actionType'] = actionType.CREATE_CTABLE - parameterDict2['actionType'] = actionType.CREATE_CTABLE - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("start insert data into child tables of stb1 and stb2") - parameterDict['actionType'] = actionType.INSERT_DATA - parameterDict2['actionType'] = actionType.INSERT_DATA - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 2 end ...... ") - def tmqCase3(self, cfgPath, buildPath): tdLog.printNoPrefix("======== test case 3: ") @@ -386,7 +223,7 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -461,7 +298,7 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -544,7 +381,7 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -580,789 +417,7 @@ class TDTestCase: tdSql.query("drop topic %s"%topicFromStb1) - tdLog.printNoPrefix("======== test case 5 end ...... ") - - def tmqCase6(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 6: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db6', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 6 end ...... ") - - def tmqCase7(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 7: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db7', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 7 end ...... ") - - def tmqCase8(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 8: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db8', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 8 end ...... ") - - def tmqCase9(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 9: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db9', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 0 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 9 end ...... ") - - def tmqCase10(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 10: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db10', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt-10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt-10000: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt+10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 10 end ...... ") - - def tmqCase11(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 11: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db11', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 11 end ...... ") - - def tmqCase12(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 12: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db12', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 12 end ...... ") - - def tmqCase13(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 13: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db13', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/2,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*(1/2+1/4): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 13 end ...... ") + tdLog.printNoPrefix("======== test case 5 end ...... ") def run(self): tdSql.prepare() @@ -1375,8 +430,6 @@ class TDTestCase: cfgPath = buildPath + "/../sim/psim/cfg" tdLog.info("cfgPath: %s" % cfgPath) - # self.tmqCase1(cfgPath, buildPath) - # self.tmqCase2(cfgPath, buildPath) self.tmqCase3(cfgPath, buildPath) self.tmqCase4(cfgPath, buildPath) self.tmqCase5(cfgPath, buildPath) diff --git a/tests/system-test/7-tmq/subscribeStb1.py b/tests/system-test/7-tmq/subscribeStb1.py index 049b297d2d..92347690d9 100644 --- a/tests/system-test/7-tmq/subscribeStb1.py +++ b/tests/system-test/7-tmq/subscribeStb1.py @@ -29,8 +29,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -183,405 +183,6 @@ class TDTestCase: return - def tmqCase1(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 1: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db1', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 100 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - time.sleep(5) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 1 end ...... ") - - def tmqCase2(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 2: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db2', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - parameterDict2 = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db2', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict2['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_stable(tdSql, parameterDict2["dbName"], parameterDict2["stbName"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 100 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start create child tables of stb1 and stb2") - parameterDict['actionType'] = actionType.CREATE_CTABLE - parameterDict2['actionType'] = actionType.CREATE_CTABLE - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("start insert data into child tables of stb1 and stb2") - parameterDict['actionType'] = actionType.INSERT_DATA - parameterDict2['actionType'] = actionType.INSERT_DATA - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 2 end ...... ") - - def tmqCase3(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 3: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db3', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 13, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - time.sleep(2) - tdLog.info("drop som child table of stb1") - dropTblNum = 4 - tdSql.query("drop table if exists %s.%s_9"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_8"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_7"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_3"%(parameterDict["dbName"], parameterDict["stbName"])) - - tdLog.info("drop some child tables, then start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - remaindrowcnt = parameterDict["rowsPerTbl"] * (parameterDict["ctbNum"] - dropTblNum) - - if not (totalConsumeRows < expectrowcnt and totalConsumeRows > remaindrowcnt): - tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, remaindrowcnt, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 3 end ...... ") - - def tmqCase4(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 4: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db4', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 4 end ...... ") - - def tmqCase5(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 5: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db5', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != (expectrowcnt * (1 + 1/4)): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 5 end ...... ") - def tmqCase6(self, cfgPath, buildPath): tdLog.printNoPrefix("======== test case 6: ") @@ -627,7 +228,7 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -714,7 +315,7 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -752,618 +353,6 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 7 end ...... ") - def tmqCase8(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 8: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db8', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 8 end ...... ") - - def tmqCase9(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 9: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db9', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 0 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 9 end ...... ") - - def tmqCase10(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 10: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db10', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt-10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt-10000: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt+10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 10 end ...... ") - - def tmqCase11(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 11: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db11', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 11 end ...... ") - - def tmqCase12(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 12: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db12', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 12 end ...... ") - - def tmqCase13(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 13: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db13', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/2,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*(1/2+1/4): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 13 end ...... ") - def run(self): tdSql.prepare() @@ -1375,19 +364,8 @@ class TDTestCase: cfgPath = buildPath + "/../sim/psim/cfg" tdLog.info("cfgPath: %s" % cfgPath) - #self.tmqCase1(cfgPath, buildPath) - #self.tmqCase2(cfgPath, buildPath) - #self.tmqCase3(cfgPath, buildPath) - #self.tmqCase4(cfgPath, buildPath) - #self.tmqCase5(cfgPath, buildPath) self.tmqCase6(cfgPath, buildPath) self.tmqCase7(cfgPath, buildPath) - self.tmqCase8(cfgPath, buildPath) - self.tmqCase9(cfgPath, buildPath) - self.tmqCase10(cfgPath, buildPath) - self.tmqCase11(cfgPath, buildPath) - self.tmqCase12(cfgPath, buildPath) - self.tmqCase13(cfgPath, buildPath) def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/subscribeStb2.py b/tests/system-test/7-tmq/subscribeStb2.py index e825ebd3b6..d08adcdc83 100644 --- a/tests/system-test/7-tmq/subscribeStb2.py +++ b/tests/system-test/7-tmq/subscribeStb2.py @@ -29,8 +29,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -183,18 +183,15 @@ class TDTestCase: return - def tmqCase1(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 1: ") + def tmqCase8(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 8: ") self.initConsumerTable() - auotCtbNum = 5 - auotCtbPrefix = 'autoCtb' - # create and start thread parameterDict = {'cfg': '', \ 'actionType': 0, \ - 'dbName': 'db1', \ + 'dbName': 'db8', \ 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ @@ -204,67 +201,102 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * (auotCtbNum + parameterDict["ctbNum"]) + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] topicList = topicFromStb1 ifcheckdata = 0 - ifManualCommit = 0 + ifManualCommit = 1 keyList = 'group.id:cgrp1,\ enable.auto.commit:false,\ auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' + auto.offset.reset:latest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - tdLog.info("start consume processor") + tdLog.info("start consume 0 processor") pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - # add some new child tables using auto ctreating mode - time.sleep(1) - for index in range(auotCtbNum): - tdSql.query("create table %s.%s_%d using %s.%s tags(%d)"%(parameterDict["dbName"], auotCtbPrefix, index, parameterDict["dbName"], parameterDict["stbName"], index)) - - self.insert_data(tdSql,parameterDict["dbName"],auotCtbPrefix,auotCtbNum,parameterDict["rowsPerTbl"],parameterDict["batchNum"]) - - tdLog.info("insert process end, and start to check consume result") + tdLog.info("start to check consume 0 result") expectRows = 1 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 1 processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") + tdLog.info("start consume 2 processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 and 2 result") + expectRows = 3 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt*2: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) + tdLog.exit("tmq consume rows error!") + tdSql.query("drop topic %s"%topicFromStb1) - tdLog.printNoPrefix("======== test case 1 end ...... ") + tdLog.printNoPrefix("======== test case 8 end ...... ") - def tmqCase2(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 2: ") + def tmqCase9(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 9: ") self.initConsumerTable() - auotCtbNum = 10 - auotCtbPrefix = 'autoCtb' - # create and start thread parameterDict = {'cfg': '', \ 'actionType': 0, \ - 'dbName': 'db2', \ + 'dbName': 'db9', \ 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ @@ -274,58 +306,96 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) - self.create_stable(tdSql, parameterDict["dbName"], 'stb2') - - tdLog.info("create topics from stb0/stb1") + tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - topicFromStb2 = 'topic_stb2' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb2, parameterDict['dbName'], 'stb2')) consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * (auotCtbNum + parameterDict["ctbNum"]) - topicList = '%s, %s'%(topicFromStb1,topicFromStb2) + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 ifcheckdata = 0 - ifManualCommit = 0 + ifManualCommit = 1 keyList = 'group.id:cgrp1,\ enable.auto.commit:false,\ auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' + auto.offset.reset:latest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - tdLog.info("start consume processor") + tdLog.info("start consume 0 processor") pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - # add some new child tables using auto ctreating mode - time.sleep(1) - for index in range(auotCtbNum): - tdSql.query("create table %s.%s_%d using %s.%s tags(%d)"%(parameterDict["dbName"], auotCtbPrefix, index, parameterDict["dbName"], 'stb2', index)) - - self.insert_data(tdSql,parameterDict["dbName"],auotCtbPrefix,auotCtbNum,parameterDict["rowsPerTbl"],parameterDict["batchNum"]) - - tdLog.info("insert process end, and start to check consume result") + tdLog.info("start to check consume 0 result") expectRows = 1 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 1 processor") + self.initConsumerInfoTable() + consumerId = 1 + ifManualCommit = 0 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") + tdLog.info("start consume 2 processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 and 2 result") + expectRows = 3 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt*2: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) + tdLog.exit("tmq consume rows error!") + tdSql.query("drop topic %s"%topicFromStb1) - tdLog.printNoPrefix("======== test case 2 end ...... ") + tdLog.printNoPrefix("======== test case 9 end ...... ") def run(self): tdSql.prepare() @@ -338,8 +408,8 @@ class TDTestCase: cfgPath = buildPath + "/../sim/psim/cfg" tdLog.info("cfgPath: %s" % cfgPath) - self.tmqCase1(cfgPath, buildPath) - self.tmqCase2(cfgPath, buildPath) + self.tmqCase8(cfgPath, buildPath) + self.tmqCase9(cfgPath, buildPath) def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/subscribeStb3.py b/tests/system-test/7-tmq/subscribeStb3.py new file mode 100644 index 0000000000..58e36911c1 --- /dev/null +++ b/tests/system-test/7-tmq/subscribeStb3.py @@ -0,0 +1,607 @@ + +import taos +import sys +import time +import socket +import os +import threading +from enum import Enum + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class actionType(Enum): + CREATE_DATABASE = 0 + CREATE_STABLE = 1 + CREATE_CTABLE = 2 + INSERT_DATA = 3 + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def initConsumerInfoTable(self,cdbName='cdb'): + tdLog.info("drop consumeinfo table") + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, dbName,stbName): + tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName)) + tdLog.debug("complete to create %s.%s" %(dbName, stbName)) + return + + def create_ctables(self,tsql, dbName,stbName,ctbNum): + tsql.execute("use %s" %dbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s_%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + + if parameterDict["actionType"] == actionType.CREATE_DATABASE: + self.create_database(tsql, parameterDict["dbName"]) + elif parameterDict["actionType"] == actionType.CREATE_STABLE: + self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"]) + elif parameterDict["actionType"] == actionType.CREATE_CTABLE: + self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + elif parameterDict["actionType"] == actionType.INSERT_DATA: + self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + else: + tdLog.exit("not support's action: ", parameterDict["actionType"]) + + return + + def tmqCase10(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 10: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db10', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:latest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume 0 processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume 0 result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 1 processor") + self.initConsumerInfoTable() + consumerId = 1 + ifManualCommit = 1 + self.insertConsumerInfo(consumerId, expectrowcnt-10000,topicList,keyList,ifcheckdata,ifManualCommit) + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt-10000: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 2 processor") + self.initConsumerInfoTable() + consumerId = 2 + ifManualCommit = 1 + self.insertConsumerInfo(consumerId, expectrowcnt+10000,topicList,keyList,ifcheckdata,ifManualCommit) + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 and 2 result") + expectRows = 3 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt*2: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 10 end ...... ") + + def tmqCase11(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 11: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db11', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 11 end ...... ") + + def tmqCase12(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 12: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db12', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt/4: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt/4: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 12 end ...... ") + + def tmqCase13(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 13: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db13', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt/4: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt/2,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt*(1/2+1/4): + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 2 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 3 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 13 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + self.tmqCase10(cfgPath, buildPath) + self.tmqCase11(cfgPath, buildPath) + self.tmqCase12(cfgPath, buildPath) + self.tmqCase13(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/subscribeStb4.py b/tests/system-test/7-tmq/subscribeStb4.py new file mode 100644 index 0000000000..d06e144796 --- /dev/null +++ b/tests/system-test/7-tmq/subscribeStb4.py @@ -0,0 +1,351 @@ + +import taos +import sys +import time +import socket +import os +import threading +from enum import Enum + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class actionType(Enum): + CREATE_DATABASE = 0 + CREATE_STABLE = 1 + CREATE_CTABLE = 2 + INSERT_DATA = 3 + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def initConsumerInfoTable(self,cdbName='cdb'): + tdLog.info("drop consumeinfo table") + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, dbName,stbName): + tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName)) + tdLog.debug("complete to create %s.%s" %(dbName, stbName)) + return + + def create_ctables(self,tsql, dbName,stbName,ctbNum): + tsql.execute("use %s" %dbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s_%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + + if parameterDict["actionType"] == actionType.CREATE_DATABASE: + self.create_database(tsql, parameterDict["dbName"]) + elif parameterDict["actionType"] == actionType.CREATE_STABLE: + self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"]) + elif parameterDict["actionType"] == actionType.CREATE_CTABLE: + self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + elif parameterDict["actionType"] == actionType.INSERT_DATA: + self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + else: + tdLog.exit("not support's action: ", parameterDict["actionType"]) + + return + + def tmqCase1(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 1: ") + + self.initConsumerTable() + + auotCtbNum = 5 + auotCtbPrefix = 'autoCtb' + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db1', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * (auotCtbNum + parameterDict["ctbNum"]) + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # add some new child tables using auto ctreating mode + time.sleep(1) + for index in range(auotCtbNum): + tdSql.query("create table %s.%s_%d using %s.%s tags(%d)"%(parameterDict["dbName"], auotCtbPrefix, index, parameterDict["dbName"], parameterDict["stbName"], index)) + + self.insert_data(tdSql,parameterDict["dbName"],auotCtbPrefix,auotCtbNum,parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 2: ") + + self.initConsumerTable() + + auotCtbNum = 10 + auotCtbPrefix = 'autoCtb' + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db2', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + self.create_stable(tdSql, parameterDict["dbName"], 'stb2') + + tdLog.info("create topics from stb0/stb1") + topicFromStb1 = 'topic_stb1' + topicFromStb2 = 'topic_stb2' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb2, parameterDict['dbName'], 'stb2')) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * (auotCtbNum + parameterDict["ctbNum"]) + topicList = '%s, %s'%(topicFromStb1,topicFromStb2) + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # add some new child tables using auto ctreating mode + time.sleep(1) + for index in range(auotCtbNum): + tdSql.query("create table %s.%s_%d using %s.%s tags(%d)"%(parameterDict["dbName"], auotCtbPrefix, index, parameterDict["dbName"], 'stb2', index)) + + self.insert_data(tdSql,parameterDict["dbName"],auotCtbPrefix,auotCtbNum,parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + self.tmqCase1(cfgPath, buildPath) + self.tmqCase2(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqDnode.py b/tests/system-test/7-tmq/tmqDnode.py index 4200b357a7..bb287134b1 100644 --- a/tests/system-test/7-tmq/tmqDnode.py +++ b/tests/system-test/7-tmq/tmqDnode.py @@ -29,8 +29,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -118,13 +118,13 @@ class TDTestCase: tdLog.debug("complete to create %s.%s" %(dbName, stbName)) return - def create_ctables(self,tsql, dbName,stbName,ctbNum): + def create_ctables(self,tsql, dbName,stbName,ctbPrefix,ctbNum): tsql.execute("use %s" %dbName) pre_create = "create table" sql = pre_create #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) for i in range(ctbNum): - sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + sql += " %s_%d using %s tags(%d)"%(ctbPrefix,i,stbName,i+1) if (i > 0) and (i%100 == 0): tsql.execute(sql) sql = pre_create @@ -211,7 +211,7 @@ class TDTestCase: for i in range(ctbNum): sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i) for j in range(rowsPerTbl): - sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + sql += "(%d, %d, 'autodata_%d') "%(startTs + j, j, j) rowsOfSql += 1 if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): tsql.execute(sql) @@ -236,7 +236,7 @@ class TDTestCase: elif parameterDict["actionType"] == actionType.CREATE_STABLE: self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"]) elif parameterDict["actionType"] == actionType.CREATE_CTABLE: - self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["stbName"], parameterDict["ctbNum"]) elif parameterDict["actionType"] == actionType.INSERT_DATA: self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\ parameterDict["rowsPerTbl"],parameterDict["batchNum"]) @@ -257,16 +257,17 @@ class TDTestCase: 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ - 'stbName': 'stb1', \ + 'stbName': 'stb1', \ + 'ctbPrefix': 'stb1', \ 'ctbNum': 10, \ 'rowsPerTbl': 10000, \ - 'batchNum': 33, \ + 'batchNum': 23, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbPrefix"], parameterDict["ctbNum"]) self.insert_data_interlaceByMultiTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) tdLog.info("create topics from stb1") @@ -290,8 +291,8 @@ class TDTestCase: showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - print("================= restart dnode ===========================") - time.sleep(3) + time.sleep(3) + tdLog.info("================= restart dnode ===========================") tdDnodes.stop(1) tdDnodes.start(1) time.sleep(2) @@ -323,36 +324,25 @@ class TDTestCase: 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ - 'stbName': 'stb1', \ + 'stbName': 'stb1', \ + 'ctbPrefix': 'stb1', \ 'ctbNum': 10, \ - 'rowsPerTbl': 15000, \ - 'batchNum': 100, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 40, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - parameterDict2 = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db2', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 16000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict2['cfg'] = cfgPath - self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_stable(tdSql, parameterDict2["dbName"], parameterDict2["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbPrefix"], parameterDict["ctbNum"]) + self.insert_data_interlaceByMultiTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] * 2 topicList = topicFromStb1 ifcheckdata = 0 ifManualCommit = 0 @@ -363,36 +353,16 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 0 + pollDelay = 50 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - tdLog.info("start create child tables of stb1 and stb2") - parameterDict['actionType'] = actionType.CREATE_CTABLE - parameterDict2['actionType'] = actionType.CREATE_CTABLE + tdLog.info("create some new child table and insert data ") + parameterDict['batchNum'] = 100 + self.insert_data_with_autoCreateTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],"ctb",parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("start insert data into child tables of stb1 and stb2") - parameterDict['actionType'] = actionType.INSERT_DATA - parameterDict2['actionType'] = actionType.INSERT_DATA - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - prepareEnvThread.join() - prepareEnvThread2.join() - - print("================= restart dnode ===========================") + tdLog.info("================= restart dnode ===========================") tdDnodes.stop(1) tdDnodes.start(1) time.sleep(2) @@ -412,6 +382,10 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 2 end ...... ") + + + + # 自动建表完成数据插入,启动消费 def tmqCase3(self, cfgPath, buildPath): tdLog.printNoPrefix("======== test case 3: ") @@ -424,17 +398,19 @@ class TDTestCase: 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ - 'stbName': 'stb1', \ + 'stbName': 'stb1', \ + 'ctbPrefix': 'stb1', \ 'ctbNum': 10, \ - 'rowsPerTbl': 50000, \ - 'batchNum': 13, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 40, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + #self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbPrefix"], parameterDict["ctbNum"]) + #self.insert_data_interlaceByMultiTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + self.insert_data_with_autoCreateTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],"ctb",parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' @@ -452,983 +428,31 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 10 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - time.sleep(5) - tdLog.info("drop som child table of stb1") - dropTblNum = 4 - tdSql.query("drop table if exists %s.%s_1"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_2"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_3"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_4"%(parameterDict["dbName"], parameterDict["stbName"])) - tdLog.info("drop some child tables, then start to check consume result") + # tdLog.info("================= restart dnode ===========================") + # tdDnodes.stop(1) + # tdDnodes.start(1) + # time.sleep(2) + + tdLog.info("insert process end, and start to check consume result") expectRows = 1 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - - remaindrowcnt = parameterDict["rowsPerTbl"] * (parameterDict["ctbNum"] - dropTblNum) - - if not (totalConsumeRows < expectrowcnt and totalConsumeRows > remaindrowcnt): - tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, remaindrowcnt, expectrowcnt)) + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") tdSql.query("drop topic %s"%topicFromStb1) tdLog.printNoPrefix("======== test case 3 end ...... ") - def tmqCase4(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 4: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db4', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 4 end ...... ") - - def tmqCase5(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 5: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db5', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != (expectrowcnt * (1 + 1/4)): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 5 end ...... ") - - def tmqCase6(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 6: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db6', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 6 end ...... ") - - def tmqCase7(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 7: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db7', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 7 end ...... ") - - def tmqCase8(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 8: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db8', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 8 end ...... ") - - def tmqCase9(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 9: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db9', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 0 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 9 end ...... ") - - def tmqCase10(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 10: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db10', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt-10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt-10000: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt+10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 10 end ...... ") - - def tmqCase11(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 11: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db11', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 11 end ...... ") - - def tmqCase12(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 12: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db12', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 12 end ...... ") - - def tmqCase13(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 13: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db13', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/2,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*(1/2+1/4): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 13 end ...... ") def run(self): tdSql.prepare() @@ -1442,8 +466,8 @@ class TDTestCase: tdLog.info("cfgPath: %s" % cfgPath) # self.tmqCase1(cfgPath, buildPath) - self.tmqCase2(cfgPath, buildPath) - # self.tmqCase3(cfgPath, buildPath) + # self.tmqCase2(cfgPath, buildPath) + self.tmqCase3(cfgPath, buildPath) # self.tmqCase4(cfgPath, buildPath) # self.tmqCase5(cfgPath, buildPath) diff --git a/tests/system-test/7-tmq/tmqModule.py b/tests/system-test/7-tmq/tmqModule.py index 8e0d741040..ad5b4d70b3 100644 --- a/tests/system-test/7-tmq/tmqModule.py +++ b/tests/system-test/7-tmq/tmqModule.py @@ -29,8 +29,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) diff --git a/tests/system-test/fulltest.bat b/tests/system-test/fulltest.bat new file mode 100644 index 0000000000..871c93c982 --- /dev/null +++ b/tests/system-test/fulltest.bat @@ -0,0 +1,4 @@ + +python3 .\test.py -f 0-others\taosShell.py +python3 .\test.py -f 0-others\taosShellError.py +python3 .\test.py -f 0-others\taosShellNetChk.py \ No newline at end of file diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index aa6843435c..37c7f18177 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -73,8 +73,12 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py python3 ./test.py -f 7-tmq/basic5.py python3 ./test.py -f 7-tmq/subscribeDb.py +python3 ./test.py -f 7-tmq/subscribeDb0.py python3 ./test.py -f 7-tmq/subscribeDb1.py python3 ./test.py -f 7-tmq/subscribeStb.py python3 ./test.py -f 7-tmq/subscribeStb0.py python3 ./test.py -f 7-tmq/subscribeStb1.py python3 ./test.py -f 7-tmq/subscribeStb2.py +python3 ./test.py -f 7-tmq/subscribeStb3.py +python3 ./test.py -f 7-tmq/subscribeStb4.py +python3 ./test.py -f 7-tmq/subscribeStb2.py diff --git a/tests/system-test/test-all.bat b/tests/system-test/test-all.bat new file mode 100644 index 0000000000..437472f7b8 --- /dev/null +++ b/tests/system-test/test-all.bat @@ -0,0 +1,25 @@ +@echo off +SETLOCAL EnableDelayedExpansion +for /F "tokens=1,2 delims=#" %%a in ('"prompt #$H#$E# & echo on & for %%b in (1) do rem"') do ( set "DEL=%%a") +set /a a=0 +@REM echo Windows Taosd Test +@REM for /F "usebackq tokens=*" %%i in (fulltest.bat) do ( +@REM echo Processing %%i +@REM set /a a+=1 +@REM call %%i ARG1 -w -m localhost > result_!a!.txt 2>error_!a!.txt +@REM if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && exit 8 ) else ( call :colorEcho 0a "Success" &echo. ) +@REM ) +echo Linux Taosd Test +for /F "usebackq tokens=*" %%i in (fulltest.bat) do ( + echo Processing %%i + set /a a+=1 + call %%i ARG1 -w 1 -m %1 > result_!a!.txt 2>error_!a!.txt + if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && exit 8 ) else ( call :colorEcho 0a "Success" &echo. ) +) +exit + +:colorEcho +echo off + "%~2" +findstr /v /a:%1 /R "^$" "%~2" nul +del "%~2" > nul 2>&1i \ No newline at end of file diff --git a/tests/system-test/test.py b/tests/system-test/test.py index 2ac8153a0d..6fd7237b33 100644 --- a/tests/system-test/test.py +++ b/tests/system-test/test.py @@ -44,8 +44,9 @@ if __name__ == "__main__": if platform.system().lower() == 'windows': windows = 1 updateCfgDict = {} - opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:', [ - 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict']) + execCmd = "" + opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:e:', [ + 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'execCmd']) for key, value in opts: if key in ['-h', '--help']: tdLog.printNoPrefix( @@ -59,6 +60,7 @@ if __name__ == "__main__": tdLog.printNoPrefix('-g valgrind Test Flag') tdLog.printNoPrefix('-r taosd restart test') tdLog.printNoPrefix('-d update cfg dict, base64 json str') + tdLog.printNoPrefix('-e eval str to run') sys.exit(0) if key in ['-r', '--restart']: @@ -97,6 +99,19 @@ if __name__ == "__main__": except: print('updateCfgDict convert fail.') sys.exit(0) + + if key in ['-e', '--execCmd']: + try: + execCmd = base64.b64decode(value.encode()).decode() + except: + print('updateCfgDict convert fail.') + sys.exit(0) + + if not execCmd == "": + tdDnodes.init(deployPath) + exec(execCmd) + quit() + if (stop != 0): if (valgrind == 0): toBeKilled = "taosd" @@ -136,7 +151,7 @@ if __name__ == "__main__": if windows: tdCases.logSql(logSql) tdLog.info("Procedures for testing self-deployment") - tdDnodes.init(deployPath) + tdDnodes.init(deployPath, masterIp) tdDnodes.setTestCluster(testCluster) tdDnodes.setValgrind(valgrind) tdDnodes.stopAll() @@ -161,15 +176,7 @@ if __name__ == "__main__": else: pass tdDnodes.deploy(1,updateCfgDict) - if masterIp == "" or masterIp == "localhost": - tdDnodes.start(1) - else: - remote_conn = Connection("root@%s"%host) - with remote_conn.cd('/var/lib/jenkins/workspace/TDinternal/community/tests/pytest'): - remote_conn.run("python3 ./test.py %s"%updateCfgDictStr) - # print("docker exec -d cross_platform bash -c \"cd ~/test/community/tests/system-test && python3 ./test.py %s\""%updateCfgDictStr) - # os.system("docker exec -d cross_platform bash -c \"cd ~/test/community/tests/system-test && (ps -aux | grep taosd | head -n 1 | awk '{print $2}' | xargs kill -9) && rm -rf /root/test/sim/dnode1/data/ && python3 ./test.py %s\""%updateCfgDictStr) - # time.sleep(2) + tdDnodes.start(1) conn = taos.connect( host="%s"%(host), config=tdDnodes.sim.getCfgDir()) @@ -178,7 +185,7 @@ if __name__ == "__main__": else: tdCases.runAllWindows(conn) else: - tdDnodes.init(deployPath) + tdDnodes.init(deployPath, masterIp) tdDnodes.setTestCluster(testCluster) tdDnodes.setValgrind(valgrind) tdDnodes.stopAll() diff --git a/tools/taos-tools b/tools/taos-tools index 4d83d8c629..2f3dfddd4d 160000 --- a/tools/taos-tools +++ b/tools/taos-tools @@ -1 +1 @@ -Subproject commit 4d83d8c62973506f760bcaa3a33f4665ed9046d0 +Subproject commit 2f3dfddd4d9a869e706ba3cf98fb6d769404cd7c