diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 249a8d1c9d..6fa3483099 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -78,7 +78,7 @@ def check_docs(){ file_only_tdgpt_change_except = sh ( script: ''' cd ${WKC} - git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v "forecastoperator.c\\|anomalywindowoperator.c" |grep -v "tsim/analytics" |grep -v "tdgpt_cases.task" || : + git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v "forecastoperator.c\\|anomalywindowoperator.c\\|tanalytics.h\\|tanalytics.c" |grep -v "tsim/analytics" |grep -v "tdgpt_cases.task" || : ''', returnStdout: true ).trim() diff --git a/README.md b/README.md index 030be7bc3b..f827c38975 100644 --- a/README.md +++ b/README.md @@ -10,8 +10,6 @@

-[![Build Status](https://cloud.drone.io/api/badges/taosdata/TDengine/status.svg?ref=refs/heads/master)](https://cloud.drone.io/taosdata/TDengine) -[![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master) [![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=3.0)](https://coveralls.io/github/taosdata/TDengine?branch=3.0) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201)
diff --git a/cmake/cmake.version b/cmake/cmake.version index 8e01e9c530..13fac68e3a 100644 --- a/cmake/cmake.version +++ b/cmake/cmake.version @@ -2,7 +2,7 @@ IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "3.3.4.8.alpha") + SET(TD_VER_NUMBER "3.3.5.0.alpha") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in index ef6ed4af1d..13826a1a74 100644 --- a/cmake/taosadapter_CMakeLists.txt.in +++ b/cmake/taosadapter_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosadapter ExternalProject_Add(taosadapter GIT_REPOSITORY https://github.com/taosdata/taosadapter.git - GIT_TAG 3.0 + GIT_TAG main SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 9a6a5329ae..9bbda8309f 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 3.0 + GIT_TAG main SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/cmake/taosws_CMakeLists.txt.in b/cmake/taosws_CMakeLists.txt.in index 17446d184d..b013d45911 100644 --- a/cmake/taosws_CMakeLists.txt.in +++ b/cmake/taosws_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosws-rs ExternalProject_Add(taosws-rs GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git - GIT_TAG 3.0 + GIT_TAG main SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/docs/en/10-third-party/03-visual/01-grafana.md b/docs/en/10-third-party/03-visual/01-grafana.md index ec857f7795..8a503b4195 100644 --- a/docs/en/10-third-party/03-visual/01-grafana.md +++ b/docs/en/10-third-party/03-visual/01-grafana.md @@ -22,15 +22,11 @@ import imgStep11 from '../../assets/grafana-11.png'; This document describes how to integrate the TDengine data source with the open-source data visualization system [Grafana](https://www.grafana.com/) to achieve data visualization and build a monitoring and alert system. With the TDengine plugin, you can easily display data from TDengine tables on Grafana dashboards without the need for complex development work. -## Grafana Version Requirements - -TDengine currently supports Grafana version 7.5 and above. It is recommended to use the latest version. Please download and install the corresponding version of Grafana according to your system environment. - ## Prerequisites To add the TDengine data source to Grafana normally, the following preparations are needed. -- Grafana service has been deployed and is running normally. +- Grafana service has been deployed and is running normally. TDengine currently supports Grafana version 7.5 and above. It is recommended to use the latest version. **Note**: Ensure that the account starting Grafana has write permissions to its installation directory, otherwise you may not be able to install plugins later. - TDengine cluster has been deployed and is running normally. - taosAdapter has been installed and is running normally. For details, please refer to the [taosAdapter user manual](../../../tdengine-reference/components/taosadapter/) diff --git a/docs/en/10-third-party/05-bi/09-seeq.md b/docs/en/10-third-party/05-bi/09-seeq.md index c8f7462a19..7fb7569461 100644 --- a/docs/en/10-third-party/05-bi/09-seeq.md +++ b/docs/en/10-third-party/05-bi/09-seeq.md @@ -13,13 +13,11 @@ Seeq is advanced analytics software for the manufacturing and Industrial Interne Through the TDengine Java connector, Seeq can easily support querying time-series data provided by TDengine and offer data presentation, analysis, prediction, and other functions. -## Seeq Installation Method +## Prerequisites -Download the relevant software from [Seeq's official website](https://www.seeq.com/customer-download), such as Seeq Server and Seeq Data Lab, etc. Seeq Data Lab needs to be installed on a different server from Seeq Server and interconnected through configuration. For detailed installation and configuration instructions, refer to the [Seeq Knowledge Base](https://support.seeq.com/kb/latest/cloud/). +- Seeq has been installed. Download the relevant software from [Seeq's official website](https://www.seeq.com/customer-download), such as Seeq Server and Seeq Data Lab, etc. Seeq Data Lab needs to be installed on a different server from Seeq Server and interconnected through configuration. For detailed installation and configuration instructions, refer to the [Seeq Knowledge Base](https://support.seeq.com/kb/latest/cloud/). -### TDengine Local Instance Installation Method - -Please refer to the [official documentation](../../../get-started). +- TDengine local instance has been installed. Please refer to the [official documentation](../../../get-started). If using TDengine Cloud, please go to https://cloud.taosdata.com apply for an account and log in to see how to access TDengine Cloud. ## Configuring Seeq to Access TDengine diff --git a/docs/en/10-third-party/05-bi/11-superset.md b/docs/en/10-third-party/05-bi/11-superset.md index aa56648b99..be3e3aa08d 100644 --- a/docs/en/10-third-party/05-bi/11-superset.md +++ b/docs/en/10-third-party/05-bi/11-superset.md @@ -9,14 +9,13 @@ Apache Superset provides an intuitive user interface that makes creating, sharin Through the Python connector of TDengine, Superset can support TDengine data sources and provide functions such as data presentation and analysis -## Install Apache Superset - -Ensure that Apache Superset v2.1.0 or above is installed. If not, please visit [official website](https://superset.apache.org/) to install - -## Install TDengine - -Both TDengine Enterprise Edition and Community Edition are supported, with version requirements of 3.0 or higher +## Prerequisites +Prepare the following environment: +- TDengine is installed and running normally (both Enterprise and Community versions are available) +- taosAdapter is running normally, refer to [taosAdapter](../../../reference/components/taosAdapter) +- Apache Superset version 2.1.0 or above is already installed, refre to [Apache Superset](https://superset.apache.org/) + ## Install TDengine Python Connector The Python connector of TDengine comes with a connection driver that supports Superset in versions 2.1.18 and later, which will be automatically installed in the Superset directory and provide data source services. diff --git a/docs/en/14-reference/01-components/05-taosx-agent.md b/docs/en/14-reference/01-components/05-taosx-agent.md index 3e8b4f4d63..d7d86a64cb 100644 --- a/docs/en/14-reference/01-components/05-taosx-agent.md +++ b/docs/en/14-reference/01-components/05-taosx-agent.md @@ -14,6 +14,7 @@ The default configuration file for `Agent` is located at `/etc/taos/agent.toml`, - `token`: Required, the Token generated when creating `Agent` in `Explorer`. - `instanceId`: The instance ID of the current taosx-agent service. If multiple taosx-agent instances are started on the same machine, it is necessary to ensure that the instance IDs of each instance are unique. - `compression`: Optional, can be configured as `true` or `false`, default is `false`. If set to `true`, it enables data compression in communication between `Agent` and `taosX`. +- `in_memory_cache_capacity`: Optional, signifies the maximum number of message batches that can be cached in memory and can be configured as a positive integer greater than zero. The default value is set at 64. - `log_level`: Optional, log level, default is `info`. Like `taosX`, it supports five levels: `error`, `warn`, `info`, `debug`, `trace`. Deprecated, please use `log.level` instead. - `log_keep_days`: Optional, the number of days to keep logs, default is `30` days. Deprecated, please use `log.keepDays` instead. - `log.path`: The directory where log files are stored. @@ -45,6 +46,10 @@ As shown below: # #compression = true +# In-memory cache capacity +# +#in_memory_cache_capacity = 64 + # log configuration [log] # All log files are stored in this directory diff --git a/docs/en/14-reference/02-tools/09-taosdump.md b/docs/en/14-reference/02-tools/09-taosdump.md index d336f66c02..75747f2f57 100644 --- a/docs/en/14-reference/02-tools/09-taosdump.md +++ b/docs/en/14-reference/02-tools/09-taosdump.md @@ -4,22 +4,17 @@ sidebar_label: taosdump slug: /tdengine-reference/tools/taosdump --- -taosdump is a tool application that supports backing up data from a running TDengine cluster and restoring the backed-up data to the same or another running TDengine cluster. - -taosdump can back up data using databases, supertables, or basic tables as logical data units, and can also back up data records within a specified time period from databases, supertables, and basic tables. You can specify the directory path for data backup; if not specified, taosdump defaults to backing up data to the current directory. - -If the specified location already has data files, taosdump will prompt the user and exit immediately to avoid data being overwritten. This means the same path can only be used for one backup. -If you see related prompts, please operate carefully. - -taosdump is a logical backup tool, it should not be used to back up any raw data, environment settings, hardware information, server configuration, or cluster topology. taosdump uses [Apache AVRO](https://avro.apache.org/) as the data file format to store backup data. +`taosdump` is a TDengine data backup/recovery tool provided for open source users, and the backed up data files adopt the standard [Apache AVRO](https://avro.apache.org/) + Format, convenient for exchanging data with the external ecosystem. + Taosdump provides multiple data backup and recovery options to meet different data needs, and all supported options can be viewed through --help. ## Installation -There are two ways to install taosdump: +Taosdump provides two installation methods: -- Install the official taosTools package, please find taosTools on the [release history page](../../../release-history/taostools/) and download it for installation. +- Taosdump is the default installation component in the TDengine installation package, which can be used after installing TDengine. For how to install TDengine, please refer to [TDengine Installation](../../../get-started/) -- Compile taos-tools separately and install, please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details. +- Compile and install taos tools separately, refer to [taos tools](https://github.com/taosdata/taos-tools) . ## Common Use Cases @@ -30,6 +25,9 @@ There are two ways to install taosdump: 3. Backup certain supertables or basic tables in a specified database: use the `dbname stbname1 stbname2 tbname1 tbname2 ...` parameter, note that this input sequence starts with the database name, supports only one database, and the second and subsequent parameters are the names of the supertables or basic tables in that database, separated by spaces; 4. Backup the system log database: TDengine clusters usually include a system database named `log`, which contains data for TDengine's own operation, taosdump does not back up the log database by default. If there is a specific need to back up the log database, you can use the `-a` or `--allow-sys` command line parameter. 5. "Tolerant" mode backup: Versions after taosdump 1.4.1 provide the `-n` and `-L` parameters, used for backing up data without using escape characters and in "tolerant" mode, which can reduce backup data time and space occupied when table names, column names, and label names do not use escape characters. If unsure whether to use `-n` and `-L`, use the default parameters for "strict" mode backup. For an explanation of escape characters, please refer to the [official documentation](../../sql-manual/escape-characters/). +6. If a backup file already exists in the directory specified by the `-o` parameter, to prevent data from being overwritten, taosdump will report an error and exit. Please replace it with another empty directory or clear the original data before backing up. +7. Currently, taosdump does not support data breakpoint backup function. Once the data backup is interrupted, it needs to be started from scratch. + If the backup takes a long time, it is recommended to use the (-S -E options) method to specify the start/end time for segmented backup. :::tip @@ -42,7 +40,8 @@ There are two ways to install taosdump: ### taosdump Restore Data -Restore data files from a specified path: use the `-i` parameter along with the data file path. As mentioned earlier, the same directory should not be used to back up different data sets, nor should the same path be used to back up the same data set multiple times, otherwise, the backup data will cause overwriting or multiple backups. +- Restore data files from a specified path: use the `-i` parameter along with the data file path. As mentioned earlier, the same directory should not be used to back up different data sets, nor should the same path be used to back up the same data set multiple times, otherwise, the backup data will cause overwriting or multiple backups. +- taosdump supports data recovery to a new database name with the parameter `-W`, please refer to the command line parameter description for details. :::tip taosdump internally uses the TDengine stmt binding API to write restored data, currently using 16384 as a batch for writing. If there are many columns in the backup data, it may cause a "WAL size exceeds limit" error, in which case you can try adjusting the `-B` parameter to a smaller value. @@ -105,6 +104,13 @@ Usage: taosdump [OPTION...] dbname [tbname ...] the table name.(Version 2.5.3) -T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is 8. + -W, --rename=RENAME-LIST Rename database name with new name during + importing data. RENAME-LIST: + "db1=newDB1|db2=newDB2" means rename db1 to newDB1 + and rename db2 to newDB2 (Version 2.5.4) + -k, --retry-count=VALUE Set the number of retry attempts for connection or + query failures + -z, --retry-sleep-ms=VALUE retry interval sleep time, unit ms -C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service -R, --restful Use RESTful interface to connect TDengine -t, --timeout=SECONDS The timeout seconds for websocket to interact. @@ -112,10 +118,6 @@ Usage: taosdump [OPTION...] dbname [tbname ...] -?, --help Give this help list --usage Give a short usage message -V, --version Print program version - -W, --rename=RENAME-LIST Rename database name with new name during - importing data. RENAME-LIST: - "db1=newDB1|db2=newDB2" means rename db1 to newDB1 - and rename db2 to newDB2 (Version 2.5.4) Mandatory or optional arguments to long options are also mandatory or optional for any corresponding short options. diff --git a/docs/en/14-reference/02-tools/10-taosbenchmark.md b/docs/en/14-reference/02-tools/10-taosbenchmark.md index 09227f210b..d1a18b5d1c 100644 --- a/docs/en/14-reference/02-tools/10-taosbenchmark.md +++ b/docs/en/14-reference/02-tools/10-taosbenchmark.md @@ -4,35 +4,38 @@ sidebar_label: taosBenchmark slug: /tdengine-reference/tools/taosbenchmark --- -taosBenchmark (formerly known as taosdemo) is a tool for testing the performance of the TDengine product. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions. It can simulate massive data generated by a large number of devices and flexibly control the number of databases, supertables, types and number of tag columns, types and number of data columns, number of subtables, data volume per subtable, data insertion interval, number of working threads in taosBenchmark, whether and how to insert out-of-order data, etc. To accommodate the usage habits of past users, the installation package provides taosdemo as a soft link to taosBenchmark. +TaosBenchmark is a performance benchmarking tool for TDengine products, providing insertion, query, and subscription performance testing for TDengine products, and outputting performance indicators. ## Installation -There are two ways to install taosBenchmark: +taosBenchmark provides two installation methods: -- taosBenchmark is automatically installed with the official TDengine installation package, for details please refer to [TDengine Installation](../../../get-started/). +- taosBenchmark is the default installation component in the TDengine installation package, which can be used after installing TDengine. For how to install TDengine, please refer to [TDengine Installation](../../../get started/) -- Compile and install taos-tools separately, for details please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository. +- Compile and install taos tools separately, refer to [taos tools](https://github.com/taosdata/taos-tools) . ## Operation ### Configuration and Operation Methods -taosBenchmark needs to be executed in the operating system's terminal, and this tool supports two configuration methods: Command Line Arguments and JSON Configuration File. These two methods are mutually exclusive; when using a configuration file, only one command line argument `-f ` can be used to specify the configuration file. When using command line arguments to run taosBenchmark and control its behavior, the `-f` parameter cannot be used; instead, other parameters must be used for configuration. In addition, taosBenchmark also offers a special mode of operation, which is running without any parameters. - -taosBenchmark supports comprehensive performance testing for TDengine, and the TDengine features it supports are divided into three categories: writing, querying, and subscribing. These three functions are mutually exclusive, and each run of taosBenchmark can only select one of them. It is important to note that the type of function to be tested is not configurable when using the command line configuration method; the command line configuration method can only test writing performance. To test TDengine's query and subscription performance, you must use the configuration file method and specify the type of function to be tested through the `filetype` parameter in the configuration file. +taosBbenchmark supports three operating modes: +- No parameter mode +- Command line mode +- JSON configuration file mode +The command-line approach is a subset of the functionality of JSON configuration files, which immediately uses the command line and then the configuration file, with the parameters specified by the command line taking precedence. **Ensure that the TDengine cluster is running correctly before running taosBenchmark.** ### Running Without Command Line Arguments -Execute the following command to quickly experience taosBenchmark performing a write performance test on TDengine based on the default configuration. +Execute the following command to quickly experience taosBenchmark performing a write performance test on TDengine based on the default configuration. ```shell taosBenchmark ``` -When running without parameters, taosBenchmark by default connects to the TDengine cluster specified under `/etc/taos`, and creates a database named `test` in TDengine, under which a supertable named `meters` is created, and 10,000 tables are created under the supertable, each table having 10,000 records inserted. Note that if a `test` database already exists, this command will delete the existing database and create a new `test` database. +When running without parameters, taosBenchmark defaults to connecting to the TDengine cluster specified in `/etc/taos/taos.cfg `. +After successful connection, a smart meter example database test, super meters, and 10000 sub meters will be created, with 10000 records per sub meter. If the test database already exists, it will be deleted before creating a new one. ### Running Using Command Line Configuration Parameters @@ -46,9 +49,7 @@ The above command `taosBenchmark` will create a database named `test`, establish ### Running Using a Configuration File -The taosBenchmark installation package includes examples of configuration files, located in `/examples/taosbenchmark-json` - -Use the following command line to run taosBenchmark and control its behavior through a configuration file. +Running in configuration file mode provides all functions, so parameters can be configured to run in the configuration file. ```shell taosBenchmark -f @@ -214,6 +215,61 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) - **-?/--help**: Displays help information and exits. Cannot be used with other parameters. + +## Output performance indicators + +#### Write indicators + +After writing is completed, a summary performance metric will be output in the last two lines in the following format: +``` bash +SUCC: Spent 8.527298 (real 8.117379) seconds to insert rows: 10000000 with 8 thread(s) into test 1172704.41 (real 1231924.74) records/second +SUCC: insert delay, min: 19.6780ms, avg: 64.9390ms, p90: 94.6900ms, p95: 105.1870ms, p99: 130.6660ms, max: 157.0830ms +``` +First line write speed statistics: +- Spent: Total write time, in seconds, counting from the start of writing the first data to the end of the last data. This indicates that a total of 8.527298 seconds were spent +- Real: Total write time (calling the engine), excluding the time spent preparing data for the testing framework. Purely counting the time spent on engine calls, The time spent is 8.117379 seconds. If 8.527298-8.117379=0.409919 seconds, it is the time spent preparing data for the testing framework +- Rows: Write the total number of rows, which is 10 million pieces of data +- Threads: The number of threads being written, which is 8 threads writing simultaneously +- Records/second write speed = `total write time` / `total number of rows written`, real in parentheses is the same as before, indicating pure engine write speed + +Second line single write delay statistics: +- min: Write minimum delay +- avg: Write normal delay +- p90: Write delay p90 percentile delay number +- p95: Write delay p95 percentile delay number +- p99: Write delay p99 percentile delay number +- max: maximum write delay +Through this series of indicators, the distribution of write request latency can be observed + +#### Query indicators +The query performance test mainly outputs the QPS indicator of query request speed, and the output format is as follows: + +``` bash +complete query with 3 threads and 10000 query delay avg: 0.002686s min: 0.001182s max: 0.012189s p90: 0.002977s p95: 0.003493s p99: 0.004645s SQL command: select ... +INFO: Total specified queries: 30000 +INFO: Spend 26.9530 second completed total queries: 30000, the QPS of all threads: 1113.049 +``` + +- The first line represents the percentile distribution of query execution and query request delay for each of the three threads executing 10000 queries. The SQL command is the test query statement +- The second line indicates that a total of 10000 * 3 = 30000 queries have been completed +- The third line indicates that the total query time is 26.9653 seconds, and the query rate per second (QPS) is 1113.049 times/second + +#### Subscription metrics + +The subscription performance test mainly outputs consumer consumption speed indicators, with the following output format: +``` bash +INFO: consumer id 0 has poll total msgs: 376, period rate: 37.592 msgs/s, total rows: 3760000, period rate: 375924.815 rows/s +INFO: consumer id 1 has poll total msgs: 362, period rate: 36.131 msgs/s, total rows: 3620000, period rate: 361313.504 rows/s +INFO: consumer id 2 has poll total msgs: 364, period rate: 36.378 msgs/s, total rows: 3640000, period rate: 363781.731 rows/s +INFO: consumerId: 0, consume msgs: 1000, consume rows: 10000000 +INFO: consumerId: 1, consume msgs: 1000, consume rows: 10000000 +INFO: consumerId: 2, consume msgs: 1000, consume rows: 10000000 +INFO: Consumed total msgs: 3000, total rows: 30000000 +``` +- Lines 1 to 3 real-time output of the current consumption speed of each consumer, msgs/s represents the number of consumption messages, each message contains multiple rows of data, and rows/s represents the consumption speed calculated by rows +- Lines 4 to 6 show the overall statistics of each consumer after the test is completed, including the total number of messages consumed and the total number of lines +- The overall statistics of all consumers in line 7, `msgs` represents how many messages were consumed in total, `rows` represents how many rows of data were consumed in total + ## Configuration File Parameters Detailed Explanation ### General Configuration Parameters @@ -331,21 +387,6 @@ Parameters related to supertable creation are configured in the `super_tables` s - **repeat_ts_max** : Numeric type, when composite primary key is enabled, specifies the maximum number of records with the same timestamp to be generated - **sqls** : Array of strings type, specifies the array of sql to be executed after the supertable is successfully created, the table name specified in sql must be prefixed with the database name, otherwise an unspecified database error will occur -#### tsma Configuration Parameters - -Specify the configuration parameters for tsma in `super_tables` under `tsmas`, with the following specific parameters: - -- **name**: Specifies the name of the tsma, mandatory. - -- **function**: Specifies the function of the tsma, mandatory. - -- **interval**: Specifies the time interval for the tsma, mandatory. - -- **sliding**: Specifies the window time shift for the tsma, mandatory. - -- **custom**: Specifies custom configuration appended at the end of the tsma creation statement, optional. - -- **start_when_inserted**: Specifies when to create the tsma after how many rows are inserted, optional, default is 0. #### Tag and Data Column Configuration Parameters @@ -423,6 +464,11 @@ For other common parameters, see Common Configuration Parameters. Configuration parameters for querying specified tables (can specify supertables, subtables, or regular tables) are set in `specified_table_query`. +- **mixed_query** "yes": `Mixed Query` "no": `Normal Query`, default is "no" +`Mixed Query`: All SQL statements in `sqls` are grouped by the number of threads, with each thread executing one group. Each SQL statement in a thread needs to perform `query_times` queries. +`Normal Query `: Each SQL in `sqls` starts `threads` and exits after executing `query_times` times. The next SQL can only be executed after all previous SQL threads have finished executing and exited. +Regardless of whether it is a `Normal Query` or `Mixed Query`, the total number of query executions is the same. The total number of queries = `sqls` * `threads` * `query_times`. The difference is that `Normal Query` starts `threads` for each SQL query, while ` Mixed Query` only starts `threads` once to complete all SQL queries. The number of thread startups for the two is different. + - **query_interval** : Query interval, in seconds, default is 0. - **threads** : Number of threads executing the SQL query, default is 1. @@ -433,7 +479,8 @@ Configuration parameters for querying specified tables (can specify supertables, #### Configuration Parameters for Querying Supertables -Configuration parameters for querying supertables are set in `super_table_query`. +Configuration parameters for querying supertables are set in `super_table_query`. +The thread mode of the super table query is the same as the `Normal Query` mode of the specified query statement described above, except that `sqls` is filled all sub tables. - **stblname** : The name of the supertable to query, required. diff --git a/docs/en/14-reference/03-taos-sql/10-function.md b/docs/en/14-reference/03-taos-sql/10-function.md index 4eb7b083fb..e6cfa20bd4 100644 --- a/docs/en/14-reference/03-taos-sql/10-function.md +++ b/docs/en/14-reference/03-taos-sql/10-function.md @@ -190,6 +190,7 @@ ROUND(expr[, digits]) - `digits` less than zero means discarding the decimal places and rounding the number to the left of the decimal point by `digits` places. If the number of places to the left of the decimal point is less than `digits`, returns 0. - Since the DECIMAL type is not yet supported, this function will use DOUBLE and FLOAT to represent results containing decimals, but DOUBLE and FLOAT have precision limits, and using this function may be meaningless when there are too many digits. - Can only be used with regular columns, selection (Selection), projection (Projection) functions, and cannot be used with aggregation (Aggregation) functions. +- `digits` is supported from version 3.3.3.0. **Example**: @@ -249,6 +250,8 @@ TAN(expr) **Function Description**: Obtains the tangent result of the specified field. +**Version**: ver-3.3.3.0 + **Return Result Type**: DOUBLE. **Applicable Data Types**: Numeric types. @@ -297,6 +300,8 @@ TRUNCATE(expr, digits) **Function Description**: Gets the truncated value of the specified field to the specified number of digits. +**Version**: ver-3.3.3.0 + **Return Type**: Consistent with the original data type of the `expr` field. **Applicable Data Types**: @@ -340,6 +345,8 @@ EXP(expr) **Function Description**: Returns the value of e (the base of natural logarithms) raised to the specified power. +**Version**: ver-3.3.3.0 + **Return Type**: DOUBLE. **Applicable Data Types**: Numeric type. @@ -370,6 +377,8 @@ LN(expr) **Function Description**: Returns the natural logarithm of the specified parameter. +**Version**: ver-3.3.3.0 + **Return Type**: DOUBLE. **Applicable Data Types**: Numeric type. @@ -401,6 +410,8 @@ MOD(expr1, expr2) **Function Description**: Calculates the result of expr1 % expr2. +**Version**: ver-3.3.3.0 + **Return Type**: DOUBLE. **Applicable Data Types**: Numeric type. @@ -437,6 +448,8 @@ RAND([seed]) **Function Description**: Returns a uniformly distributed random number from 0 to 1. +**Version**: ver-3.3.3.0 + **Return Result Type**: DOUBLE. **Applicable Data Types**: @@ -484,6 +497,8 @@ SIGN(expr) **Function Description**: Returns the sign of the specified parameter. +**Version**: ver-3.3.3.0 + **Return Result Type**: Consistent with the original data type of the specified field. **Applicable Data Types**: Numeric types. @@ -527,6 +542,8 @@ DEGREES(expr) **Function Description**: Calculates the value of the specified parameter converted from radians to degrees. +**Version**: ver-3.3.3.0 + **Return Result Type**: DOUBLE. **Applicable Data Types**: Numeric types. @@ -558,6 +575,8 @@ RADIANS(expr) **Function Description**: Calculates the value of the specified parameter converted from degrees to radians. +**Version**: ver-3.3.3.0 + **Return Type**: DOUBLE. **Applicable Data Types**: Numeric types. @@ -729,6 +748,8 @@ TRIM([remstr FROM] expr) **Function Description**: Returns the string expr with all prefixes or suffixes of remstr removed. +**Version**: ver-3.3.3.0 + **Return Result Type**: Same as the original type of the input field expr. **Applicable Data Types**: @@ -807,6 +828,8 @@ SUBSTRING/SUBSTR(expr FROM pos [FOR len]) - If `len` is less than 1, returns an empty string. - `pos` is 1-based; if `pos` is 0, returns an empty string. - If `pos` + `len` exceeds `len(expr)`, returns the substring from `pos` to the end of the string, equivalent to executing `substring(expr, pos)`. +- Function `SUBSTRING` is equal to `SUBSTR`, supported from ver-3.3.3.0. +- Syntax `SUBSTRING/SUBSTR(expr FROM pos [FOR len])` is supported from ver-3.3.3.0. **Examples**: @@ -845,6 +868,8 @@ SUBSTRING_INDEX(expr, delim, count) **Function Description**: Returns a substring of `expr` cut at the position where the delimiter appears the specified number of times. +**Version**: ver-3.3.3.0 + **Return Result Type**: Same as the original type of the input field `expr`. **Applicable Data Types**: @@ -902,6 +927,8 @@ CHAR(expr1 [, expr2] [, expr3] ...) **Function Description**: Treats the input parameters as integers and returns the characters corresponding to these integers in ASCII encoding. +**Version**: ver-3.3.3.0 + **Return Result Type**: VARCHAR. **Applicable Data Types**: Integer types, VARCHAR, NCHAR. @@ -949,6 +976,8 @@ ASCII(expr) **Function Description**: Returns the ASCII code of the first character of the string. +**Version**: ver-3.3.3.0 + **Return Result Data Type**: BIGINT. **Applicable Data Types**: VARCHAR, NCHAR. @@ -979,6 +1008,8 @@ POSITION(expr1 IN expr2) **Function Description**: Calculates the position of string `expr1` in string `expr2`. +**Version**: ver-3.3.3.0 + **Return Result Type**: BIGINT. **Applicable Data Types**: @@ -1026,6 +1057,8 @@ REPLACE(expr, from_str, to_str) **Function Description**: Replaces all occurrences of `from_str` in the string with `to_str`. +**Version**: ver-3.3.3.0 + **Return Type**: Same as the original type of the input field `expr`. **Applicable Data Types**: @@ -1061,6 +1094,8 @@ REPEAT(expr, count) **Function Description**: Returns a string that repeats the string `expr` a specified number of times. +**Version**: ver-3.3.3.0 + **Return Type**: Same as the original type of the input field `expr`. **Applicable Data Types**: @@ -1319,6 +1354,7 @@ TIMEDIFF(expr1, expr2 [, time_unit]) - `expr1`: BIGINT, TIMESTAMP types representing timestamps, or VARCHAR, NCHAR types in ISO8601/RFC3339 standard date-time format. - `expr2`: BIGINT, TIMESTAMP types representing timestamps, or VARCHAR, NCHAR types in ISO8601/RFC3339 standard date-time format. - `time_unit`: See usage instructions. +- `timediff` return the absolute value of the difference between timestamp `expr1` and `expr2` before ver-3.3.3.0. **Nested Subquery Support**: Applicable to both inner and outer queries. @@ -1423,6 +1459,8 @@ WEEK(expr [, mode]) **Function Description**: Returns the week number of the input date. +**Version**: ver-3.3.3.0 + **Return Result Type**: BIGINT. **Applicable Data Types**: @@ -1490,6 +1528,8 @@ WEEKOFYEAR(expr) **Function Description**: Returns the week number of the input date. +**Version**: ver-3.3.3.0 + **Return Type**: BIGINT. **Applicable Data Types**: BIGINT, TIMESTAMP types representing timestamps, or VARCHAR, NCHAR types in ISO8601/RFC3339 date-time format. @@ -1521,6 +1561,8 @@ WEEKDAY(expr) **Function Description**: Returns the weekday of the input date. +**Version**: ver-3.3.3.0 + **Return Type**: BIGINT. **Applicable Data Types**: BIGINT, TIMESTAMP types representing timestamps, or VARCHAR, NCHAR types in ISO8601/RFC3339 date-time format. @@ -1552,6 +1594,8 @@ DAYOFWEEK(expr) **Function Description**: Returns the weekday of the input date. +**Version**: ver-3.3.3.0 + **Return Type**: BIGINT. **Applicable Data Types**: BIGINT, TIMESTAMP types representing timestamps, or VARCHAR, NCHAR types in ISO8601/RFC3339 date-time format. @@ -1707,6 +1751,9 @@ STDDEV/STDDEV_POP(expr) **Applicable to**: Tables and supertables. +**Description**: +- Function `STDDEV_POP` equals `STDDEV` and is supported from ver-3.3.3.0. + **Example**: ```sql @@ -1733,6 +1780,8 @@ VAR_POP(expr) **Function Description**: Calculates the population variance of a column in a table. +**Version**: ver-3.3.3.0 + **Return Data Type**: DOUBLE. **Applicable Data Types**: Numeric types. @@ -1975,7 +2024,8 @@ MAX(expr) **Applicable to**: Tables and supertables. -**Usage Instructions**: The max function can accept strings as input parameters, and when the input parameter is a string type, it returns the largest string value. +**Usage Instructions**: +- The max function can accept strings as input parameters, and when the input parameter is a string type, it returns the largest string value(supported from ver-3.3.3.0, function `max` only accept numeric parameter before ver-3.3.3.0). ### MIN @@ -1991,7 +2041,8 @@ MIN(expr) **Applicable to**: Tables and supertables. -**Usage Instructions**: The min function can accept strings as input parameters, and when the input parameter is a string type, it returns the largest string value. +**Usage Instructions**: +- The min function can accept strings as input parameters, and when the input parameter is a string type, it returns the largest string value(supported from ver-3.3.3.0, function `min` only accept numeric parameter before ver-3.3.3.0). ### MODE diff --git a/docs/en/14-reference/09-error-code.md b/docs/en/14-reference/09-error-code.md index 1d3ea3f9a1..2bbd8f9305 100644 --- a/docs/en/14-reference/09-error-code.md +++ b/docs/en/14-reference/09-error-code.md @@ -386,7 +386,7 @@ This document details the server error codes that may be encountered when using | 0x8000260D | Tags number not matched | Mismatched number of tag columns | Check and correct the SQL statement | | 0x8000260E | Invalid tag name | Invalid or non-existent tag name | Check and correct the SQL statement | | 0x80002610 | Value is too long | Value length exceeds limit | Check and correct the SQL statement or API parameters | -| 0x80002611 | Password can not be empty | Password is empty | Use a valid password | +| 0x80002611 | Password too short or empty | Password is empty or less than 8 chars | Use a valid password | | 0x80002612 | Port should be an integer that is less than 65535 and greater than 0 | Illegal port number | Check and correct the port number | | 0x80002613 | Endpoint should be in the format of 'fqdn:port' | Incorrect address format | Check and correct the address information | | 0x80002614 | This statement is no longer supported | Feature has been deprecated | Refer to the feature documentation | diff --git a/docs/zh/01-index.md b/docs/zh/01-index.md index 1fda72024c..02c6105d43 100644 --- a/docs/zh/01-index.md +++ b/docs/zh/01-index.md @@ -16,7 +16,7 @@ TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移 如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出、配置参数,如何监测 TDengine 是否健康运行,如何提升系统运行的性能,请仔细参考[运维指南](./operation)一章。 -如果你对数据库内核设计感兴趣,或是开源爱好者,建议仔细阅读[技术内幕](./tdinterna)一章。该章从分布式架构到存储引擎、查询引擎、数据订阅,再到流计算引擎都做了详细阐述。建议对照文档,查看TDengine在GitHub的源代码,对TDengine的设计和编码做深入了解,更欢迎加入开源社区,贡献代码。 +如果你对数据库内核设计感兴趣,或是开源爱好者,建议仔细阅读[技术内幕](./tdinternal)一章。该章从分布式架构到存储引擎、查询引擎、数据订阅,再到流计算引擎都做了详细阐述。建议对照文档,查看TDengine在GitHub的源代码,对TDengine的设计和编码做深入了解,更欢迎加入开源社区,贡献代码。 最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。 diff --git a/docs/zh/04-get-started/03-package.md b/docs/zh/04-get-started/03-package.md index 9724c0a1c9..344b9412df 100644 --- a/docs/zh/04-get-started/03-package.md +++ b/docs/zh/04-get-started/03-package.md @@ -8,8 +8,6 @@ import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; import PkgListV3 from "/components/PkgListV3"; -您可以[用 Docker 立即体验](../../get-started/docker/) TDengine。如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装. - TDengine 完整的软件包包括服务端(taosd)、应用驱动(taosc)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、命令行程序(CLI,taos)和一些工具软件。目前 TDinsight 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/components/taosadapter/) 提供 [RESTful 接口](../../reference/connector/rest-api/)。 为方便使用,标准的服务端安装包包含了 taosd、taosAdapter、taosc、taos、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 Lite 版本的安装包。 @@ -319,4 +317,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1 SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s); ``` -在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。 \ No newline at end of file +在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。 diff --git a/docs/zh/04-get-started/index.md b/docs/zh/04-get-started/index.md index 4d9f7ceae5..5a7192f2c6 100644 --- a/docs/zh/04-get-started/index.md +++ b/docs/zh/04-get-started/index.md @@ -10,7 +10,7 @@ import official_account from './official-account.webp' TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../reference/components/taosadapter) 提供 [RESTful 接口](../reference/connector/rest-api)。 -本章主要介绍如何利用 Docker 或者安装包快速设置 TDengine 环境并体验其高效写入和查询。 +本章主要介绍如何快速设置 TDengine 环境并体验其高效写入和查询。 ```mdx-code-block import DocCardList from '@theme/DocCardList'; @@ -34,4 +34,4 @@ import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; 关注 TDengine 视频号
收看技术直播与教学视频 关注 TDengine 公众号
阅读技术文章与行业案例 - \ No newline at end of file + diff --git a/docs/zh/06-advanced/03-stream.md b/docs/zh/06-advanced/03-stream.md index f5202cddad..333dabd189 100644 --- a/docs/zh/06-advanced/03-stream.md +++ b/docs/zh/06-advanced/03-stream.md @@ -175,7 +175,7 @@ TDengine 对于修改数据提供两种处理方式,由 IGNORE UPDATE 选项 用户可以为每个 partition 对应的子表生成自定义的 TAG 值,如下创建流的语句, ```sql -CREATE STREAM output_tag trigger at_once INTO output_tag_s TAGS(alias_tag varchar(100)) as select _wstart, count(*) from power.meters partition by concat("tag-", tbname) as alias_tag interval(10s)); +CREATE STREAM output_tag trigger at_once INTO output_tag_s TAGS(alias_tag varchar(100)) as select _wstart, count(*) from power.meters partition by concat("tag-", tbname) as alias_tag interval(10s); ``` 在 PARTITION 子句中,为 concat("tag-", tbname)定义了一个别名 alias_tag, 对应超级表 output_tag_s 的自定义 TAG 的名字。在上述示例中,流新创建的子表的 TAG 将以前缀 'tag-' 连接原表名作为 TAG 的值。会对 TAG 信息进行如下检查。 diff --git a/docs/zh/06-advanced/05-data-in/07-mqtt.md b/docs/zh/06-advanced/05-data-in/07-mqtt.mdx similarity index 98% rename from docs/zh/06-advanced/05-data-in/07-mqtt.md rename to docs/zh/06-advanced/05-data-in/07-mqtt.mdx index a0e121f632..3ffab4dfbf 100644 --- a/docs/zh/06-advanced/05-data-in/07-mqtt.md +++ b/docs/zh/06-advanced/05-data-in/07-mqtt.mdx @@ -166,6 +166,12 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解 ![mqtt-14](./mqtt-14.png) -### 8. 创建完成 +### 8. 异常处理策略 + +import Contributing from './_03-exception-handling-strategy.mdx' + + + +### 9. 创建完成 点击 **提交** 按钮,完成创建 MQTT 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。 diff --git a/docs/zh/06-advanced/05-data-in/08-kafka.md b/docs/zh/06-advanced/05-data-in/08-kafka.mdx similarity index 97% rename from docs/zh/06-advanced/05-data-in/08-kafka.md rename to docs/zh/06-advanced/05-data-in/08-kafka.mdx index b605f84c7a..71070b271c 100644 --- a/docs/zh/06-advanced/05-data-in/08-kafka.md +++ b/docs/zh/06-advanced/05-data-in/08-kafka.mdx @@ -196,12 +196,16 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解 ### 8. 配置高级选项 -**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示: +import AdvancedOptions from './_02-advanced_options.mdx' -![kafka-15.png](./kafka-15.png) + -![kafka-16.png](./kafka-16.png) +### 9. 异常处理策略 -### 9. 创建完成 +import Contributing from './_03-exception-handling-strategy.mdx' + + + +### 10. 创建完成 点击 **提交** 按钮,完成创建 Kafka 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。 diff --git a/docs/zh/06-advanced/05-data-in/09-influxdb.md b/docs/zh/06-advanced/05-data-in/09-influxdb.mdx similarity index 94% rename from docs/zh/06-advanced/05-data-in/09-influxdb.md rename to docs/zh/06-advanced/05-data-in/09-influxdb.mdx index d0b781667d..b88bcdf3c6 100644 --- a/docs/zh/06-advanced/05-data-in/09-influxdb.md +++ b/docs/zh/06-advanced/05-data-in/09-influxdb.mdx @@ -75,9 +75,9 @@ InfluxDB 是一种流行的开源时间序列数据库,它针对处理大量 ### 6. 配置高级选项 -**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示: -![InfluxDB-09zh-AdvancedOptionsExpandButton.png](./pic/InfluxDB-09zh-AdvancedOptionsExpandButton.png "高级选项展开按钮") -![InfluxDB-10zh-AdvancedOptionsExpand.png](./pic/InfluxDB-10zh-AdvancedOptionsExpand.png "高级选项展开按钮") +import AdvancedOptions from './_02-advanced_options.mdx' + + ### 7. 创建完成 diff --git a/docs/zh/06-advanced/05-data-in/10-opentsdb.md b/docs/zh/06-advanced/05-data-in/10-opentsdb.mdx similarity index 92% rename from docs/zh/06-advanced/05-data-in/10-opentsdb.md rename to docs/zh/06-advanced/05-data-in/10-opentsdb.mdx index 3737f2a415..eeb4e37988 100644 --- a/docs/zh/06-advanced/05-data-in/10-opentsdb.md +++ b/docs/zh/06-advanced/05-data-in/10-opentsdb.mdx @@ -58,9 +58,9 @@ OpenTSDB 是一个架构在 HBase 系统之上的实时监控信息收集和展 ### 5. 配置高级选项 -**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示: -![OpenTSDB-07zh-AdvancedOptionsExpandButton.png](./pic/OpenTSDB-07zh-AdvancedOptionsExpandButton.png "高级选项展开按钮") -![OpenTSDB-08zh-AdvancedOptionsExpand.png](./pic/OpenTSDB-08zh-AdvancedOptionsExpand.png "高级选项展开按钮") +import AdvancedOptions from './_02-advanced_options.mdx' + + ### 6. 创建完成 diff --git a/docs/zh/06-advanced/05-data-in/11-csv.md b/docs/zh/06-advanced/05-data-in/11-csv.mdx similarity index 95% rename from docs/zh/06-advanced/05-data-in/11-csv.md rename to docs/zh/06-advanced/05-data-in/11-csv.mdx index 4924ed2fbd..5737fc8b79 100644 --- a/docs/zh/06-advanced/05-data-in/11-csv.md +++ b/docs/zh/06-advanced/05-data-in/11-csv.mdx @@ -107,13 +107,25 @@ sidebar_label: "CSV" ![csv-09.png](./csv-09.png) -### 5. 创建完成 +### 5. 配置高级选项 + +import AdvancedOptions from './_02-advanced_options.mdx' + + + +### 6. 异常处理策略 + +import Contributing from './_03-exception-handling-strategy.mdx' + + + +### 7. 创建完成 点击 **提交** 按钮,完成创建 CSV 到 TDengine 的数据同步任务,回到数据写入任务列表页面,可查看任务执行情况,也可以进行任务的“启动/停止”操作与“查看/编辑/删除/复制”操作。 ![csv-10.png](./csv-10.png) -### 6. 查看运行指标 +### 8. 查看运行指标 点击 **查看** 按钮,查看任务的运行指标,同时也可以查看任务中所有文件的处理情况。 diff --git a/docs/zh/06-advanced/05-data-in/12-aveva-historian.md b/docs/zh/06-advanced/05-data-in/12-aveva-historian.mdx similarity index 97% rename from docs/zh/06-advanced/05-data-in/12-aveva-historian.md rename to docs/zh/06-advanced/05-data-in/12-aveva-historian.mdx index ee04194dea..e8ab4c839e 100644 --- a/docs/zh/06-advanced/05-data-in/12-aveva-historian.md +++ b/docs/zh/06-advanced/05-data-in/12-aveva-historian.mdx @@ -134,6 +134,12 @@ split 提取器,seperator 填写分割符 `,`, number 填写 2。 ![aveva-historian-08.png](pic/aveva-historian-08.png) -### 7. 创建完成 +### 7. 异常处理策略 + +import Contributing from './_03-exception-handling-strategy.mdx' + + + +### 8. 创建完成 点击 **提交** 按钮,完成创建任务。提交任务后,回到**数据写入**页面可以查看任务状态。 diff --git a/docs/zh/06-advanced/05-data-in/13-mysql.md b/docs/zh/06-advanced/05-data-in/13-mysql.mdx similarity index 93% rename from docs/zh/06-advanced/05-data-in/13-mysql.md rename to docs/zh/06-advanced/05-data-in/13-mysql.mdx index 4cc84fbfa2..f1894190cb 100644 --- a/docs/zh/06-advanced/05-data-in/13-mysql.md +++ b/docs/zh/06-advanced/05-data-in/13-mysql.mdx @@ -98,14 +98,16 @@ MySQL 是最流行的关系型数据库之一。很多系统都曾经或正在 ### 8. 配置高级选项 -**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示: +import AdvancedOptions from './_02-advanced_options.mdx' -**最大读取并发数** 数据源连接数或读取线程数限制,当默认参数不满足需要或需要调整资源使用量时修改此参数。 + -**批次大小** 单次发送的最大消息数或行数。默认是 10000。 +### 9. 异常处理策略 -![mysql-07.png](pic/mysql-07.png) +import Contributing from './_03-exception-handling-strategy.mdx' -### 9. 创建完成 + + +### 10. 创建完成 点击 **提交** 按钮,完成创建 MySQL 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。 diff --git a/docs/zh/06-advanced/05-data-in/14-postgres.md b/docs/zh/06-advanced/05-data-in/14-postgres.mdx similarity index 93% rename from docs/zh/06-advanced/05-data-in/14-postgres.md rename to docs/zh/06-advanced/05-data-in/14-postgres.mdx index af8297bfff..7651db68f2 100644 --- a/docs/zh/06-advanced/05-data-in/14-postgres.md +++ b/docs/zh/06-advanced/05-data-in/14-postgres.mdx @@ -99,14 +99,16 @@ TDengine 可以高效地从 PostgreSQL 读取数据并将其写入 TDengine, ### 8. 配置高级选项 -**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示: +import AdvancedOptions from './_02-advanced_options.mdx' -**最大读取并发数** 数据源连接数或读取线程数限制,当默认参数不满足需要或需要调整资源使用量时修改此参数。 + -**批次大小** 单次发送的最大消息数或行数。默认是 10000。 +### 9. 异常处理策略 -![postgres-07.png](pic/postgres-07.png) +import Contributing from './_03-exception-handling-strategy.mdx' -### 9. 创建完成 + + +### 10. 创建完成 点击 **提交** 按钮,完成创建 PostgreSQL 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。 diff --git a/docs/zh/06-advanced/05-data-in/15-oracle.md b/docs/zh/06-advanced/05-data-in/15-oracle.mdx similarity index 93% rename from docs/zh/06-advanced/05-data-in/15-oracle.md rename to docs/zh/06-advanced/05-data-in/15-oracle.mdx index 39bbab32d3..484365415e 100644 --- a/docs/zh/06-advanced/05-data-in/15-oracle.md +++ b/docs/zh/06-advanced/05-data-in/15-oracle.mdx @@ -91,14 +91,16 @@ TDengine 可以高效地从 Oracle 读取数据并将其写入 TDengine,以实 ### 7. 配置高级选项 -**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示: +import AdvancedOptions from './_02-advanced_options.mdx' -**最大读取并发数** 数据源连接数或读取线程数限制,当默认参数不满足需要或需要调整资源使用量时修改此参数。 + -**批次大小** 单次发送的最大消息数或行数。默认是 10000。 +### 8. 异常处理策略 -![oracle-06.png](pic/oracle-06.png) +import Contributing from './_03-exception-handling-strategy.mdx' -### 8. 创建完成 + + +### 9. 创建完成 点击 **提交** 按钮,完成创建 Oracle 到 TDengine 的数据同步任务,回到**数据源列表****页面可查看任务执行情况。 diff --git a/docs/zh/06-advanced/05-data-in/16-mssql.md b/docs/zh/06-advanced/05-data-in/16-mssql.mdx similarity index 94% rename from docs/zh/06-advanced/05-data-in/16-mssql.md rename to docs/zh/06-advanced/05-data-in/16-mssql.mdx index 81e9e98013..1e6b9928be 100644 --- a/docs/zh/06-advanced/05-data-in/16-mssql.md +++ b/docs/zh/06-advanced/05-data-in/16-mssql.mdx @@ -105,14 +105,16 @@ Microsoft SQL Server 是最流行的关系型数据库之一。很多系统都 ### 8. 配置高级选项 -**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示: +import AdvancedOptions from './_02-advanced_options.mdx' -**最大读取并发数** 数据源连接数或读取线程数限制,当默认参数不满足需要或需要调整资源使用量时修改此参数。 + -**批次大小** 单次发送的最大消息数或行数。默认是 10000。 +### 9. 异常处理策略 -![mssql-07.png](pic/mssql-07.png) +import Contributing from './_03-exception-handling-strategy.mdx' -### 9. 创建完成 + + +### 10. 创建完成 点击 **提交** 按钮,完成创建 Microsoft SQL Server 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。 diff --git a/docs/zh/06-advanced/05-data-in/17-mongodb.md b/docs/zh/06-advanced/05-data-in/17-mongodb.mdx similarity index 94% rename from docs/zh/06-advanced/05-data-in/17-mongodb.md rename to docs/zh/06-advanced/05-data-in/17-mongodb.mdx index 5311bc43c6..e92f37a6f0 100644 --- a/docs/zh/06-advanced/05-data-in/17-mongodb.md +++ b/docs/zh/06-advanced/05-data-in/17-mongodb.mdx @@ -122,14 +122,16 @@ MongoDB 是一个介于关系型数据库与非关系型数据库之间的产品 ### 8. 配置高级选项 -**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示: +import AdvancedOptions from './_02-advanced_options.mdx' -**最大读取并发数** 数据源连接数或读取线程数限制,当默认参数不满足需要或需要调整资源使用量时修改此参数。 + -**批次大小** 单次发送的最大消息数或行数。默认是 10000。 +### 9. 异常处理策略 -![mongodb-07.png](pic/mongodb-07.png) +import Contributing from './_03-exception-handling-strategy.mdx' -### 9. 创建完成 + + +### 10. 创建完成 点击 **提交** 按钮,完成创建 MongoDB 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。 diff --git a/docs/zh/06-advanced/05-data-in/_02-advanced_options.mdx b/docs/zh/06-advanced/05-data-in/_02-advanced_options.mdx new file mode 100644 index 0000000000..f37de063c0 --- /dev/null +++ b/docs/zh/06-advanced/05-data-in/_02-advanced_options.mdx @@ -0,0 +1,7 @@ +**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示: + +**最大读取并发数** 数据源连接数或读取线程数限制,当默认参数不满足需要或需要调整资源使用量时修改此参数。 + +**批次大小** 单次发送的最大消息数或行数。默认是 10000。 + +![advanced_options.png](pic/advanced_options.png) \ No newline at end of file diff --git a/docs/zh/06-advanced/05-data-in/_03-exception-handling-strategy.mdx b/docs/zh/06-advanced/05-data-in/_03-exception-handling-strategy.mdx new file mode 100644 index 0000000000..470c304ff3 --- /dev/null +++ b/docs/zh/06-advanced/05-data-in/_03-exception-handling-strategy.mdx @@ -0,0 +1,23 @@ +异常处理策略区域是对数据异常时的处理策略进行配置,默认折叠的,点击右侧 `>` 可以展开,如下图所示: + +![exception-handling-strategy.png](pic/exception-handling-strategy.png) + +各异常项说明及相应可选处理策略如下: + +> 通用处理策略说明: +> 归档:将异常数据写入归档文件(默认路径为 `${data_dir}/tasks/_id/.datetime`),不写入目标库 +> 丢弃:将异常数据忽略,不写入目标库 +> 报错:任务报错 + +- **主键时间戳溢出** 检查数据中第一列时间戳是否在正确的时间范围内(now - keep1, now + 100y),可选处理策略:归档、丢弃、报错 +- **主键时间戳空** 检查数据中第一列时间戳是否为空,可选处理策略:归档、丢弃、报错、使用当前时间 + > 使用当前时间:使用当前时间填充到空的时间戳字段中 +- **表名长度溢出** 检查子表表名的长度是否超出限制(最大 192 字符),可选处理策略:归档、丢弃、报错、截断、截断且归档 + > 截断:截取原始表名的前 192 个字符作为新的表名 + > 截断且归档:截取原始表名的前 192 个字符作为新的表名,并且将此行记录写入归档文件 +- **表名非法字符** 检查子表表名中是否包含特殊字符(符号 `.` 等),可选处理策略:归档、丢弃、报错、非法字符替换为指定字符串 + > 非法字符替换为指定字符串:将原始表名中的特殊字符替换为后方输入框中的指定字符串,例如 `a.b` 替换为 `a_b` +- **表名模板变量空值** 检查子表表名模板中的变量是否为空,可选处理策略:丢弃、留空、变量替换为指定字符串 + > 留空:变量位置不做任何特殊处理,例如 `a_{x}` 转换为 `a_` + > 变量替换为指定字符串:变量位置使用后方输入框中的指定字符串,例如 `a_{x}` 转换为 `a_b` +- **列名长度溢出** 检查列名的长度是否超出限制(最大 64 字符),可选处理策略:归档、丢弃、报错 \ No newline at end of file diff --git a/docs/zh/06-advanced/05-data-in/health-options.png b/docs/zh/06-advanced/05-data-in/health-options.png new file mode 100644 index 0000000000..d20a520a95 Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/health-options.png differ diff --git a/docs/zh/06-advanced/05-data-in/index.md b/docs/zh/06-advanced/05-data-in/index.md index 0dfa04db56..8f23fe2a81 100644 --- a/docs/zh/06-advanced/05-data-in/index.md +++ b/docs/zh/06-advanced/05-data-in/index.md @@ -294,9 +294,32 @@ let v3 = data["voltage"].split(","); 在任务列表页面,还可以对任务进行启动、停止、查看、删除、复制等操作,也可以查看各个任务的运行情况,包括写入的记录条数、流量等。 +### 健康状态 + +从 3.3.5.0 开始,在任务管理列表中,增加了一项 ”健康状态“,用于指示当前任务运行过程中的健康状态。 + +在数据源的”高级选项“列表中,增加了多项健康状态监测的配置项,包括: + +![health options](./health-options.png) + +1. 健康监测时段(Health Check Duration):可选项,表示对最近多长时间的任务状态进行统计。 +2. Busy 状态阈值(Busy State Threshold):百分比,表示写入队列中入队元素数量与队列长度之比,默认 100%。 +3. 写入队列长度(Max Write Queue Length):表示对应的写入队列长度最大值。 +4. 写入错误阈值(Write Error Threshold):数值类型,表示健康监测时段中允许写入错误的数量。超出阈值,则报错。 + +在任务管理列表展示中,有如下状态: + +- Ready:数据源和目标端健康检查通过,可以进行数据读取和写入。 +- Idle:表示监测时段内无数据处理(没有数据进入处理流程)。 +- Busy:表示写入队列已满(超出一定阈值,表示写入繁忙,在一定程度上意味着当前可能存在性能瓶颈,需要调整参数或配置等来进行改善,但并不说明存在错误)。 +- Bounce:数据源和目标端均正常,但在写入过程中存在错误,一定周期内超出阈值,可能意味着存在大量非正常数据或正在发生数据丢失。 +- SourceError: 数据源错误导致无法进行读取。此时工作负载将尝试重连数据源。 +- SinkError:写入端错误导致无法进行写入。此时工作负载将尝试重连数据库,恢复后进入 Ready 状态。 +- Fatal:严重或无法恢复的错误。 + ```mdx-code-block import DocCardList from '@theme/DocCardList'; import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; -``` \ No newline at end of file +``` diff --git a/docs/zh/06-advanced/05-data-in/kafka-15.png b/docs/zh/06-advanced/05-data-in/kafka-15.png deleted file mode 100644 index 96d593dad9..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/kafka-15.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/kafka-16.png b/docs/zh/06-advanced/05-data-in/kafka-16.png deleted file mode 100644 index 395453c410..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/kafka-16.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-09zh-AdvancedOptionsExpandButton.png b/docs/zh/06-advanced/05-data-in/pic/InfluxDB-09zh-AdvancedOptionsExpandButton.png deleted file mode 100644 index f12692c506..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-09zh-AdvancedOptionsExpandButton.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-10zh-AdvancedOptionsExpand.png b/docs/zh/06-advanced/05-data-in/pic/InfluxDB-10zh-AdvancedOptionsExpand.png deleted file mode 100644 index dbb188852c..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-10zh-AdvancedOptionsExpand.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-07zh-AdvancedOptionsExpandButton.png b/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-07zh-AdvancedOptionsExpandButton.png deleted file mode 100644 index 65d6344e56..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-07zh-AdvancedOptionsExpandButton.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-08zh-AdvancedOptionsExpand.png b/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-08zh-AdvancedOptionsExpand.png deleted file mode 100644 index ea5dc538e5..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-08zh-AdvancedOptionsExpand.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/advanced_options.png b/docs/zh/06-advanced/05-data-in/pic/advanced_options.png new file mode 100644 index 0000000000..8ef9b8d35a Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/pic/advanced_options.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/exception-handling-strategy.png b/docs/zh/06-advanced/05-data-in/pic/exception-handling-strategy.png new file mode 100644 index 0000000000..1e1d55d85c Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/pic/exception-handling-strategy.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/mongodb-07.png b/docs/zh/06-advanced/05-data-in/pic/mongodb-07.png deleted file mode 100644 index 2305ec3d2e..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/mongodb-07.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/mssql-07.png b/docs/zh/06-advanced/05-data-in/pic/mssql-07.png deleted file mode 100644 index 6c1668481c..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/mssql-07.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/mysql-07.png b/docs/zh/06-advanced/05-data-in/pic/mysql-07.png deleted file mode 100644 index 6c1668481c..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/mysql-07.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/oracle-06.png b/docs/zh/06-advanced/05-data-in/pic/oracle-06.png deleted file mode 100644 index 0de5443f08..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/oracle-06.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/postgres-07.png b/docs/zh/06-advanced/05-data-in/pic/postgres-07.png deleted file mode 100644 index 6c1668481c..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/postgres-07.png and /dev/null differ diff --git a/docs/zh/08-operation/09-backup.md b/docs/zh/08-operation/09-backup.md index 1eda0a646b..075cc244f4 100644 --- a/docs/zh/08-operation/09-backup.md +++ b/docs/zh/08-operation/09-backup.md @@ -6,45 +6,154 @@ toc_max_heading_level: 4 为了防止数据丢失、误删操作,TDengine 提供全面的数据备份、恢复、容错、异地数据实时同步等功能,以保证数据存储的安全。本节简要说明备份和恢复功能。 -## 基于 taosdump 进行数据备份恢复 +# 1. 基于 taosdump 进行数据备份恢复 -taosdump 是一个开源工具,用于支持从运行中的 TDengine 集群备份数据并将备份的数据恢复到相同或另一个正在运行的 TDengine 集群中。taosdump 可以将数据库作为逻辑数据单元进行备份,也可以对数据库中指定时间段内的数据记录进行备份。在使用taosdump 时,可以指定数据备份的目录路径。如果不指定目录路径,taosdump 将默认将数据备份到当前目录。 +taosdump 是一个开源工具,用于支持从运行中的 TDengine 集群备份数据并将备份的数据恢复到相同或另一个正在运行的 TDengine +集群中。taosdump 可以将数据库作为逻辑数据单元进行备份,也可以对数据库中指定时间段内的数据记录进行备份。在使用taosdump +时,可以指定数据备份的目录路径。如果不指定目录路径,taosdump 将默认将数据备份到当前目录。 以下为 taosdump 执行数据备份的使用示例。 + ```shell taosdump -h localhost -P 6030 -D dbname -o /file/path ``` -执行上述命令后,taosdump 会连接 localhost:6030 所在的 TDengine 集群,查询数据库 dbname 中的所有数据,并将数据备份到 /f ile/path 下。 +执行上述命令后,taosdump 会连接 localhost:6030 所在的 TDengine 集群,查询数据库 dbname 中的所有数据,并将数据备份到 /f +ile/path 下。 -在使用 taosdump 时,如果指定的存储路径已经包含数据文件,taosdump 会提示用户并立即退出,以避免数据被覆盖。这意味着同一存储路径只能用于一次备份。如果你看到相关提示,请谨慎操作,以免误操作导致数据丢失。 +在使用 taosdump 时,如果指定的存储路径已经包含数据文件,taosdump +会提示用户并立即退出,以避免数据被覆盖。这意味着同一存储路径只能用于一次备份。如果你看到相关提示,请谨慎操作,以免误操作导致数据丢失。 + +要将本地指定文件路径中的数据文件恢复到正在运行的 TDengine 集群中,可以通过指定命令行参数和数据文件所在路径来执行 taosdump +命令。以下为 taosdump 执行数据恢复的示例代码。 -要将本地指定文件路径中的数据文件恢复到正在运行的 TDengine 集群中,可以通过指定命令行参数和数据文件所在路径来执行 taosdump 命令。以下为 taosdump 执行数据恢复的示例代码。 ```shell taosdump -i /file/path -h localhost -P 6030 ``` 执行上述命令后,taosdump 会连接 localhost:6030 所在的 TDengine 集群,并将 /file/path 下的数据文件恢复到 TDengine 集群中。 -## 基于 TDengine Enterprise 进行数据备份恢复 +# 2. 基于 TDengine Enterprise 进行数据备份恢复 -TDengine Enterprise 提供了一个高效的增量备份功能,具体流程如下。 +## 2.1. 概念 -第 1 步,通过浏览器访问 taosExplorer 服务,访问地址通常为 TDengine 集群所在 IP 地址的端口 6060,如 http://localhost:6060。 +基于 TDengine 的数据订阅功能,TDengine Enterprise 实现了数据的增量备份和恢复。用户可以通过 taosExplorer 对 TDengine +集群进行备份和恢复。 -第 2 步,在 taosExplorer 服务页面中的“系统管理 - 备份”页面新增一个数据备份任务,在任务配置信息中填写需要备份的数据库名称和备份存储文件路径,完成创建任务 -后即可启动数据备份。 在数据备份配置页面中可以配置三个参数: - - 备份周期:必填项,配置每次执行数据备份的时间间隔,可通过下拉框选择每天、每 7 天、每 30 天执行一次数据备份,配置后,会在对应的备份周期的0:00时启动一次数据备份任务; - - 数据库:必填项,配置需要备份的数据库名(数据库的 wal_retention_period 参数需大于0); - - 目录:必填项,配置将数据备份到 taosX 所在运行环境中指定的路径下,如 /root/data_backup; +TDengine Enterprise 的备份和恢复功能包括以下几个概念: -第 3 步,在数据备份任务完成后,在相同页面的已创建任务列表中找到创建的数据备份任务,直接执行一键恢复,就能够将数据恢复到 TDengine 中。 +1. 备份对象:用户可以对一个数据库,或者一个超级表进行备份。 +2. 备份计划:用户可以为某个备份对象创建一个备份计划。备份计划从指定的时间点开始,周期性的执行一次备份任务,并生成一组备份文件。 +3. 备份点:每执行一次备份任务,生成一组备份文件,它们对应一个时间点,称为**备份点**。第一个备份点称为**初始备份点**。 +4. 备份文件:多个备份点,组成备份计划的备份文件。 +5. 恢复任务:用户可以选择备份计划的某个备份点,创建一个恢复任务。恢复任务会从初始备份点开始,逐个应用备份点,恢复到指定的备份点。 -与 taosdump 相比,如果对相同的数据在指定存储路径下进行多次备份操作,由于TDengine Enterprise 不仅备份效率高,而且实行的是增量处理,因此每次备份任务都会很快完成。而由于 taosdump 永远是全量备份,因此 TDengine Enterprise 在数据量较大的场景下可以显著减小系统开销,而且更加方便。 +![backup-zh-00.png](./pic/backup-00-concept.png "数据备份和恢复") -**常见错误排查** +以上面的图为例: -1. 如果任务启动失败并报以下错误: +1. 用户创建了一个备份计划,从 2024-08-27 00:00:00 开始,每隔 1 天执行一次备份任务。 +2. 在 2024-08-27 00:00:00 执行了第一次备份任务,生成了一个备份点。 +3. 之后,每隔 1 天执行一次备份任务,生成了多个备份点,组成了备份文件。 +4. 用户可以选择某个备份点,创建一个恢复任务,恢复到指定的备份点。 +5. 恢复任务会从初始备份点开始,逐个应用备份点,恢复到指定的备份点。 + +## 2.2. 备份计划 + +### 2.1.1. 创建 + +1. 通过浏览器访问 taosExplorer 服务,访问地址通常为 TDengine 集群所在 IP 地址的端口 6060,如 http://localhost:6060。 +2. 在 taosExplorer 服务页面中,进入“系统管理 - 备份”页面,点击“创建备份计划”按钮。 + +![backup-zh-01.png](./pic/backup-01-create.png "创建备份计划") + +3. 在弹出的“创建备份计划”表单中,填写备份计划的相关信息。 + +![backup-zh-02.png](./pic/backup-02-form.png "填写备份计划信息") + +需要填写的信息包括: + +* 数据库:需要备份的数据库名称。一个备份计划只能备份一个数据库/超级表。 +* 超级表:需要备份的超级表名称。如果不填写,则备份整个数据库。 +* 下次执行时间:首次执行备份任务的日期时间。 +* 备份周期:备份点之间的时间间隔。注意:备份周期必须大于数据库的 WAL_RETENTION_PERIOD 参数值。 +* 错误重试次数:对于可通过重试解决的错误,系统会按照此次数进行重试。 +* 错误重试间隔:每次重试之间的时间间隔。 +* 目录:存储备份文件的目录。 +* 备份文件大小:备份文件的大小限制。当备份文件大小达到此限制时,会自动创建新的备份文件。 +* 文件压缩等级:备份文件的压缩等级。支持:最快速度、最佳压缩比、兼具速度和压缩比。 + +创建成功后,备份计划会开始按照配置的参数运行。 + +### 2.1.2. 查看 + +在“备份计划”下的列表中,可以查看已创建的备份计划。 + +![backup-zh-03.png](./pic/backup-03-list.png "查看备份计划列表") + +点击“操作”中的“查看”按钮,可以查看备份计划的详细信息。 + +![backup-zh-04.png](./pic/backup-04-view.png "查看备份计划详情") + +### 2.1.3. 修改 + +点击“操作”中的“修改”按钮,可以修改备份计划的配置。 + +![backup-zh-05.png](./pic/backup-05-edit.png "修改备份计划") + +修改备份计划的配置后,当前运行的备份任务会先停止,然后按照新的配置重新运行。 + +### 2.1.4. 复制 + +点击“操作”中的“复制”按钮,可以复制备份计划。 + +![backup-zh-06.png](./pic/backup-06-copy.png "复制备份计划") + +除了数据库和超级表被置为空外,其他配置项和被复制的计划相同。用户点击“确认”后,创建一个新的备份计划。 + +### 2.1.5. 删除 + +在操作中点击关闭按钮,可以停止当前备份计划。点击“操作”中的“删除”按钮,可以删除备份计划。 + +![backup-zh-07.png](./pic/backup-07-del.png "删除备份计划") + +删除备份计划时,可以选择,是否删除关联的备份文件。 + +## 2.2. 备份文件 + +### 2.2.1. 查看 + +在备份计划列表中,选择要一个备份计划。在“备份文件”列中,点击“查看”按钮。可以查看和备份计划的所有备份点。 + +![backup-zh-08.png](./pic/backup-08-files.png "查看备份文件") + +在备份文件列表中,可以查看备份文件的详细信息。 + +![backup-zh-09.png](./pic/backup-09-filelist.png "查看备份文件列表") + +## 2.3. 恢复任务 + +### 2.3.1. 创建 + +在备份文件列表中,点击“操作”中的“恢复”按钮,可以创建一个恢复任务。 + +![backup-zh-10.png](./pic/backup-10-restore-create.png "创建恢复任务") + +在弹出的对话框中,选择使用哪个备份点开始恢复,默认为最早的备份点。点击“确定”后,创建恢复任务,并跳转至“恢复任务”列表。 + +### 2.3.2. 查看 + +在“恢复任务”列表中,可以查看已创建的恢复任务。 + +![backup-zh-11.png](./pic/backup-11-restore-list.png "查看恢复任务列表") + +恢复任务可以终止。点击“操作”中的开关,可以终止当前恢复任务。 + +# 3. 常见错误排查 + +## 3.1. 端口访问异常 + +如果任务启动失败并报以下错误: ```text Error: tmq to td task exec error @@ -52,9 +161,12 @@ Error: tmq to td task exec error Caused by: [0x000B] Unable to establish connection ``` + 产生原因是与数据源的端口链接异常,需检查数据源 FQDN 是否联通及端口 6030 是否可正常访问。 -2. 如果使用 WebSocket 连接,任务启动失败并报以下错误: +## 3.2. 连接异常 + +如果使用 WebSocket 连接,任务启动失败并报以下错误: ```text Error: tmq to td task exec error @@ -73,7 +185,9 @@ Caused by: - "HTTP error: *": 可能连接到错误的 taosAdapter 端口或 LSB/Nginx/Proxy 配置错误。 - "WebSocket protocol error: Handshake not finished": WebSocket 连接错误,通常是因为配置的端口不正确。 -3. 如果任务启动失败并报以下错误: +## 3.3. 任务启动失败 + +如果任务启动失败并报以下错误: ```text Error: tmq to td task exec error @@ -88,5 +202,6 @@ Caused by: 修改数据 WAL 配置: ```sql -alter database test wal_retention_period 3600; +alter +database test wal_retention_period 3600; ``` \ No newline at end of file diff --git a/docs/zh/08-operation/18-ha/01-replica3.md b/docs/zh/08-operation/18-ha/01-replica3.md new file mode 100644 index 0000000000..fee19a0afe --- /dev/null +++ b/docs/zh/08-operation/18-ha/01-replica3.md @@ -0,0 +1,66 @@ +--- +title: 三副本方案 +sidebar_label: 三副本方案 +toc_max_heading_level: 4 +--- + +TDengine 的三副本方案采用 RAFT 算法来实现数据的一致性,包括元数据和时序数据。一个虚拟节点组(VGroup)构成了一个 RAFT 组;VGroup 中的虚拟节点(Vnode),便是该 RAFT 组的成员节点,也称之为副本。 + +1. 每个 Vnode 都有自己的角色,可以是 Leader(领导者)、Follower(跟随者)、Candidate(候选人)。 +2. 每个 Vnode 都维护了一份连续的日志,用于记录数据写入、变更、或删除等操作的所有指令。日志是由一系列有序的日志条目组成,每条日志都有唯一的编号,用于标识日志协商或执行的进度。 +3. Leader 角色的 Vnode 提供读写服务,在故障节点不超过半数的情况下保证集群的高可用性。此外,即使发生了节点重启及 Leader 重新选举等事件后,RAFT 协议也能够始终保证新产生的 Leader 可以提供已经写入成功的全部完整数据的读写服务。 +4. 每一次对数据库的变更请求(比如数据写入),都对应一条日志。在持续写入数据的过程中,会按照协议机制在每个成员节点上产生完全相同的日志记录,并且以相同的顺序执行数据变更操作,以 WAL 文件的形式存储在数据文件目录中。 +5. 只有当过半数的节点把该条日志追加到 WAL 文件,并且收到确认消息之后,这条日志才会被 Leader 认为是安全的;此时该日志进入 committed 状态,完成数据的插入,随后该日志被标记为 applied 的状态。 + +多副本工作原理参见 [数据写入与复制流程](../../26-tdinternal/01-arch.md#数据写入与复制流程) + +## 集群配置 + +三副本要求集群至少配置三个服务器节点,基本部署与配置步骤如下: +1. 确定服务器节点数量、主机名或域名,配置好所有节点的域名解析:DNS 或 /etc/hosts +2. 各节点分别安装 TDengine 服务端安装包,按需编辑好各节点 taos.cfg +3. 启动各节点 taosd 服务,其他服务可按需启动(taosadapter/taosx/taoskeeper/taos-explorer) + +## 运维命令 + +### 创建集群 + +创建三节点的集群 + +```sql +CREATE dnode port ; +CREATE dnode port ; +``` + +创建三副本的 Mnode,保证 Mnode 高可用 + +```sql +CREATE mnode on dnode ; +CREATE mnode on dnode ; +``` + +### 数据库创建 + +创建三副本的数据库 + +```sql +create database replica 3 vgroups xx buffer xx ... +``` + +### 修改数据库副本数 + +创建了单副本数据库后,如果希望改为三副本时,可通过 alter 命令来实现,反之亦然 + +```sql +alter database replica 3|1 +``` + +## 常见问题 + +### 1. 创建三副本数据库或修改为三副本时,报错:DB error: Out of dnodes +- 服务器节点数不足:原因是服务器节点数少于三个。 +- 解决方案:增加服务器节点数量,满足最低要求。 + +### 2. 创建三副本数据库或 split vgroup 时,报错:DB error: Vnodes exhausted +- 服务器可用 Vnodes 不足:原因是某些服务器节点可用 Vnodes 数少于建库或 split vgroup 的需求数。 +- 解决方案:调整服务器 CPU 数量、SupportVnodes 配置参数,满足建库要求。 \ No newline at end of file diff --git a/docs/zh/08-operation/18-ha/02-replica2.md b/docs/zh/08-operation/18-ha/02-replica2.md new file mode 100644 index 0000000000..7f3eb2fe5c --- /dev/null +++ b/docs/zh/08-operation/18-ha/02-replica2.md @@ -0,0 +1,84 @@ +--- +title: 双副本方案 +sidebar_label: 双副本方案 +toc_max_heading_level: 4 +--- + +部分用户期望在保证一定可靠性、可用性条件下,尽可能压缩部署成本。为此,TDengine 提出基于 Arbitrator 的双副本方案,可提供集群中**只有单个服务故障且不出现连续故障**的容错能力。双副本方案是 TDengine Enterprise 特有功能,在 3.3.0.0 版本中第一次发布,建议使用最新版本。 + +双副本选主由高可用的 Mnode 提供仲裁服务,不由 Raft 组内决定。 +1. Arbitrator:仲裁服务,不存储数据,VGroup 因某一 Vnode 故障而无法提供服务时,Arbitrator 可根据数据同步情况指定 VGroup 内另一 Vnode 成为 Assigned Leader +2. AssignedLeader:被强制设置为 Leader 的 Vnode,无论其他副本 Vnode 是否存活,均可一直响应用户请求 + +![replica2.png](../pic/replica2.png) + +## 集群配置 + +双副本要求集群至少配置三个节点,基本部署与配置步骤如下: +1. 确定服务器节点数量、主机名或域名,配置好所有节点的域名解析:DNS 或 /etc/hosts +2. 各节点分别安装 TDengine **企业版**服务端安装包,按需编辑好各节点 taos.cfg +3. 可选择其中一个节点仅提供仲裁服务(部署 Mnode),将 SupportVnodes 参数设置为 0,表示不存储时序数据;该占用资源较少,仅需 1~2 核,且可与其他应用共用 +4. 启动各节点 taosd 服务,其他服务可按需启动(taosadapter/taosx/taoskeeper/taos-explorer) + +## 约束条件 +1. 最小配置的服务器节点数为 2+1 个,其中两个数据节点,一个仲裁节点 +2. 双副本为数据库建库参数,不同数据库可按需选择副本数 +3. 支持 TDengine 集群的完整特性,包括:读缓存、数据订阅、流计算等 +4. 支持 TDengine 所有语言连接器以及连接方式 +5. 支持单副本与双副本之间切换(前提是节点数量满足需求、各节点可用 Vnode 数量/内存/存储空间足够) +6. 不支持双副本与三副本之间的切换 +7. 不支持双副本切换为双活,除非另外部署一套实例与当前实例组成双活方案 + +## 运维命令 + +### 创建集群 + +创建三节点的集群 + +```sql +CREATE dnode port ; +CREATE dnode port ; +``` + +创建三副本的 Mnode,保证 Mnode 高可用,确保仲裁服务的高可用 + +```sql +CREATE mnode on dnode ; +CREATE mnode on dnode ; +``` + +### 数据库创建 + +按需创建双副本数据库 + +```sql +create database replica 2 vgroups xx buffer xx ... +``` + +### 修改数据库副本数 + +创建了单副本数据库后,希望改为双副本时,可通过 alter 命令来实现,反之亦然 + +```sql +alter database replica 2|1 +``` + +## 异常情况 + +| 异常场景 | 集群状态 | +| ------- | ------ | +| 没有 Vnode 发生故障: Arbitrator 故障(Mnode 宕机节点超过一个,导致 Mnode 无法选主)| **持续提供服务** | +| 仅一个 Vnode 故障:VGroup 已经达成同步后,某一个 Vnode 才发生故障的 | **持续提供服务** | +| 仅一个 Vnode 故障:离线 Vnode 启动后,VGroup 未达成同步前,另一个 Vnode 服务故障的 | **无法提供服务** | +| 两个 Vnode 都发生故障 | **无法提供服务** | + + +## 常见问题 + +### 1. 创建双副本数据库或修改为双副本时,报错:DB error: Out of dnodes +- 服务器节点数不足:原因是,数据服务器节点数少于两个。 +- 解决方案:增加服务器节点数量,满足最低要求。 + +### 2. 创建双副本数据库或 split vgroup 时,报错:DB error: Vnodes exhausted +- 服务器可用 Vnodes 不足:原因是某些服务器节点可用 Vnodes 数少于建库或 split vgroup 的需求数。 +- 解决方案:调整服务器 CPU 数量、SupportVnodes 数量,满足建库要求。 diff --git a/docs/zh/08-operation/18-dual.md b/docs/zh/08-operation/18-ha/03-dual.md similarity index 70% rename from docs/zh/08-operation/18-dual.md rename to docs/zh/08-operation/18-ha/03-dual.md index caddb7ab3b..20565bd562 100644 --- a/docs/zh/08-operation/18-dual.md +++ b/docs/zh/08-operation/18-ha/03-dual.md @@ -1,31 +1,29 @@ --- -title: TDengine 双活系统 -sidebar_label: 双活系统 +title: 双活方案 +sidebar_label: 双活方案 toc_max_heading_level: 4 --- -本节介绍 TDengine 双活系统的配置和使用。 +部分用户因为部署环境的特殊性只能部署两台服务器,同时希望实现一定的服务高可用和数据高可靠。本文主要描述基于数据复制和客户端 Failover 两项关键技术的 TDengine 双活系统的产品行为,包括双活系统的架构、配置、运维等。TDengine 双活既可以用于前面所述资源受限的环境,也可用于在两套 TDengine 集群(不限资源)之间的灾备场景。双活是 TDengine Enterprise 特有功能,在 3.3.0.0 版本中第一次发布,建议使用最新版本。 -1. 部分用户因为部署环境的特殊性只能部署两台服务器,同时希望实现一定的服务高可用和数据高可靠。本文主要描述基于数据复制和客户端 Failover 两项关键技术的 TDengine 双活系统的产品行为,包括双活系统的架构、配置、运维等。TDengine 双活既可以用于前面所述资源受限的环境,也可用于在两套 TDengine 集群(不限资源)之间的灾备场景。双活是 TDengine Enterprise 特有功能,在 3.3.0.0 版本中第一次发布,建议使用最新版本。 +双活系统的定义是:业务系统中有且仅有两台服务器,其上分别部署一套服务,在业务层看来这两台机器和两套服务是一个完整的系统,对其中的细节业务层不需要感知。双活中的两个节点通常被称为 Master-Slave,意为”主从“或”主备“,本文档中可能会出现混用的情况。 -2. 双活系统的定义是:业务系统中有且仅有两台服务器,其上分别部署一套服务,在业务层看来这两台机器和两套服务是一个完整的系统,对其中的细节业务层不需要感知。双活中的两个节点通常被称为 Master-Slave,意为”主从“或”主备“,本文档中可能会出现混用的情况。 +TDengine 双活系统的部署架构图如下, 其中涉及到三个关键点: -3. TDengine 双活系统的部署架构图如下, 其中涉及到三个关键点: 1. 由 Client Driver 实现对双系统的 Failover,即主节点宕机时的主从切换 2. 由 taosX 从(当前的)主节点到从节点实现数据复制 3. 由数据订阅的写接口在写入复制过来的数据时在 WAL 中加入特殊标记,由数据订阅的读接口在读取数据时自动过滤掉带有该特殊标记的数据,避免重复复制形成 infinite loop 注:下图中仅以一个单机版 TDengine 作为示例,但在实际部署中图中的一个 Host 也可以被任意节点数量的 TDengine 集群代替。 -![Active-Standby.png](./Active-Standby.png) +![Active-Standby.png](../pic/Active-Standby.png) -## 配置 -### 集群配置 +## 集群配置 双活对 TDengine 集群本身的配置没有任何要求,但对要在双活系统之间同步的数据库的 WAL 保留时长有一定要求,WAL 保留时长越大双活系统的容错率越高;如果备节点宕机时长超过主节点上的 WAL 保留时长,必定会导致备节点上有数据缺失;如果备节点宕机时长虽未超过主节点上的 WAL 保留时长,也有一定概率丢失数据,取决于接近的程度以及数据同步的速度。 -### 客户端配置 +## 客户端配置 目前只有 Java 连接器在 WebSocket 连接模式下支持双活,其配置示例如下 @@ -42,19 +40,19 @@ connection = DriverManager.getConnection(url, properties); 其中的配置属性及含义如下表 -| 属性名 | 含义 | -| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------- | -| PROPERTY_KEY_SLAVE_CLUSTER_HOST | 第二节点的主机名或者 ip,默认空 | -| PROPERTY_KEY_SLAVE_CLUSTER_PORT | 第二节点的端口号,默认空 | +| 属性名 | 含义 | +| ---------------------------------- | --- | +| PROPERTY_KEY_SLAVE_CLUSTER_HOST | 第二节点的主机名或者 ip,默认空 | +| PROPERTY_KEY_SLAVE_CLUSTER_PORT | 第二节点的端口号,默认空 | | PROPERTY_KEY_ENABLE_AUTO_RECONNECT | 是否启用自动重连。仅在使用 WebSocket 连接时生效。true: 启用,false: 不启用。默认为 false。双活场景下请设置为 true | -| PROPERTY_KEY_RECONNECT_INTERVAL_MS | 重连的时间间隔,单位毫秒:默认 2000 毫秒,也就是 2 秒;最小值为 0, 表示立即重试;最大值不做限制 | -| PROPERTY_KEY_RECONNECT_RETRY_COUNT | 每节点最多重试次数:默认值为 3;最小值为 0,表示不进行重试;最大值不做限制 | +| PROPERTY_KEY_RECONNECT_INTERVAL_MS | 重连的时间间隔,单位毫秒:默认 2000 毫秒,也就是 2 秒;最小值为 0, 表示立即重试;最大值不做限制 | +| PROPERTY_KEY_RECONNECT_RETRY_COUNT | 每节点最多重试次数:默认值为 3;最小值为 0,表示不进行重试;最大值不做限制 | -### 约束条件 +## 约束条件 -1. 应用程序不能使用订阅接口,如果配置了双活参数会导致创建消费者失败。 -2. 不建议应用程序使用参数绑定的写入和查询方式,如果使用应用需要自己解决连接切换后的相关对象失效问题。 -3. 在双活场景下,不建议用户应用程序显示调用 use database,应该在连接参数中指定 database。 +1. 应用程序不能使用订阅接口,如果配置了双活参数会导致创建消费者失败 +2. 不建议应用程序使用参数绑定的写入和查询方式,如果使用应用需要自己解决连接切换后的相关对象失效问题 +3. 在双活场景下,不建议用户应用程序显示调用 use database,应该在连接参数中指定 database 4. 双活的两端集群必须同构(即数据库的命名和所有配置参数以及用户名密码和权限设置等完全相同) 5. 只支持 WebSocket 连接模式 @@ -73,7 +71,7 @@ taosx replica start 1. 方法一 ```shell - - taosx replica start -f source_endpoint -t sink_endpoint [database...] +taosx replica start -f source_endpoint -t sink_endpoint [database...] ``` 在本机器所在的 taosx 服务中建立从 source_endpoint 到 sink_endpoint 的同步任务。运行该命令成功后,将打印 replica ID 到控制台(后续记为 id)。 @@ -82,6 +80,7 @@ taosx replica start ```shell taosx replica start -f td1:6030 -t td2:6030 ``` + 该示例命令会自动创建除 information_schema、performance_schema、log、audit 库之外的同步任务。可以使用 `http://td2:6041` 指定该 endpoint 使用 websocket 接口(默认是原生接口)。也可以指定数据库同步:taosx replica start -f td1:6030 -t td2:6030 db1 仅创建指定的数据库同步任务。 2. 方法二 @@ -93,9 +92,9 @@ taosx replica start -i id [database...] 使用上面已经创建的 Replica ID (id) 以在该同步任务中增加其它数据库。 注意: -- 多次使用该命令,不会创建重复任务,仅将所指定的数据库增加到相应任务中。 -- replica id 在一个 taosX 实例内是全局唯一的,与 source/sink 的组合无关 -- 为便于记忆,replica id 为一个随机常用单词,系统自动将 source/sink 组合对应到一个词库中取得一个唯一可用单词。 +1. 多次使用该命令,不会创建重复任务,仅将所指定的数据库增加到相应任务中。 +2. replica id 在一个 taosX 实例内是全局唯一的,与 source/sink 的组合无关 +3. 为便于记忆,replica id 为一个随机常用单词,系统自动将 source/sink 组合对应到一个词库中取得一个唯一可用单词。 ### 查看任务状态 @@ -120,8 +119,8 @@ taosx replica stop id [db...] ``` 该命令作用如下: -- 停止指定 Replica ID 下所有或指定数据库的双副本同步任务。 -- 使用 `taosx replica stop id1 db1` 表示停止 id1 replica 下 db1的同步任务。 +1. 停止指定 Replica ID 下所有或指定数据库的双副本同步任务。 +2. 使用 `taosx replica stop id1 db1` 表示停止 id1 replica 下 db1的同步任务。 ### 重启双活任务 @@ -130,8 +129,8 @@ taosx replica restart id [db...] ``` 该命令作用如下: -- 重启指定 Replica ID 下所有或指定数据库的双副本同步任务。 -- 使用 `taosx replica start id1 db1` 仅重启指定数据库 db1的同步任务。 +1. 重启指定 Replica ID 下所有或指定数据库的双副本同步任务。 +2. 使用 `taosx replica start id1 db1` 仅重启指定数据库 db1的同步任务。 ### 查看同步进度 diff --git a/docs/zh/08-operation/18-ha/index.md b/docs/zh/08-operation/18-ha/index.md new file mode 100644 index 0000000000..d482646ddd --- /dev/null +++ b/docs/zh/08-operation/18-ha/index.md @@ -0,0 +1,25 @@ +--- +sidebar_label: 高可用 +title: 高可用 +--- + +TDengine 作为分布式时序数据库,支持高可用特性。默认高可用方案为基于 RAFT 协议的标准三副本方案;为适应不同用户场景的需要,提供基于 RAFT 协议改造的双副本方案;为满足传统双机主备架构的需求,提供基于 WAL 数据同步的双活方案。 + +- 标准三副本方案:时序数据的副本数目为 3,确保了最高的可用性,成本也最高。 +- 双副本结合 Arbitrator 方案:时序数据的副本数目为 2,但节点数目至少为 3,以确保高可用性和良好的数据一致性,可显著降低成本。与三副本方案相比,此方案在显著降低成本的同时,依然保持了较高的可用性。 +- 双活方案:可仅部署两个节点,高可用性较好,数据一致性较弱(最终一致性)。 + +以下为三种方案的特点: + +| # | **三副本** | **双副本** | **双活** | +|:--|:----------|:----------|:--------| +| **集群数目** | 部署一个集群 | 部署一个集群 | 部署两个不同集群 | +| **最小节点数** | 三个数据节点 | 两个数据节点,一个仲裁节点 | 两个数据节点 | +| **选主原理** | Raft 协议 | 管理节点仲裁选主 | 无需选主 | +| **同步原理** | Raft 协议 | Raft 协议 | 通过 taosX 进行数据同步 | +| **同步延迟** | 无延迟 | 无延迟 | 依赖于 taosX 的同步速度,秒级延迟 | +| **数据安全性** | 无数据丢失 | 无数据丢失 | 依赖于 WAL 的保存时长 | +| **数据一致性** | RAFT 一致性 | RAFT 一致性 | 最终一致性 | +| **高可用性** | 任一节点宕机不影响服务 | 任一节点宕机不影响服务,但不能处理连续宕机场景 | 一个实例存活即可提供服务 | + + diff --git a/docs/zh/08-operation/Active-Standby.png b/docs/zh/08-operation/Active-Standby.png deleted file mode 100644 index f0caab5c55..0000000000 Binary files a/docs/zh/08-operation/Active-Standby.png and /dev/null differ diff --git a/docs/zh/08-operation/pic/backup-00-concept.png b/docs/zh/08-operation/pic/backup-00-concept.png new file mode 100644 index 0000000000..5123b4d540 Binary files /dev/null and b/docs/zh/08-operation/pic/backup-00-concept.png differ diff --git a/docs/zh/08-operation/pic/backup-01-create.png b/docs/zh/08-operation/pic/backup-01-create.png new file mode 100644 index 0000000000..a424c276e5 Binary files /dev/null and b/docs/zh/08-operation/pic/backup-01-create.png differ diff --git a/docs/zh/08-operation/pic/backup-02-form.png b/docs/zh/08-operation/pic/backup-02-form.png new file mode 100644 index 0000000000..3ccd81c831 Binary files /dev/null and b/docs/zh/08-operation/pic/backup-02-form.png differ diff --git a/docs/zh/08-operation/pic/backup-03-list.png b/docs/zh/08-operation/pic/backup-03-list.png new file mode 100644 index 0000000000..505d6a8040 Binary files /dev/null and b/docs/zh/08-operation/pic/backup-03-list.png differ diff --git a/docs/zh/08-operation/pic/backup-04-view.png b/docs/zh/08-operation/pic/backup-04-view.png new file mode 100644 index 0000000000..7bfa699906 Binary files /dev/null and b/docs/zh/08-operation/pic/backup-04-view.png differ diff --git a/docs/zh/08-operation/pic/backup-05-edit.png b/docs/zh/08-operation/pic/backup-05-edit.png new file mode 100644 index 0000000000..5ff1204ad5 Binary files /dev/null and b/docs/zh/08-operation/pic/backup-05-edit.png differ diff --git a/docs/zh/08-operation/pic/backup-06-copy.png b/docs/zh/08-operation/pic/backup-06-copy.png new file mode 100644 index 0000000000..2ec1ea68d0 Binary files /dev/null and b/docs/zh/08-operation/pic/backup-06-copy.png differ diff --git a/docs/zh/08-operation/pic/backup-07-del.png b/docs/zh/08-operation/pic/backup-07-del.png new file mode 100644 index 0000000000..e1cf4748bf Binary files /dev/null and b/docs/zh/08-operation/pic/backup-07-del.png differ diff --git a/docs/zh/08-operation/pic/backup-08-files.png b/docs/zh/08-operation/pic/backup-08-files.png new file mode 100644 index 0000000000..07f2184d4f Binary files /dev/null and b/docs/zh/08-operation/pic/backup-08-files.png differ diff --git a/docs/zh/08-operation/pic/backup-09-filelist.png b/docs/zh/08-operation/pic/backup-09-filelist.png new file mode 100644 index 0000000000..b963091f36 Binary files /dev/null and b/docs/zh/08-operation/pic/backup-09-filelist.png differ diff --git a/docs/zh/08-operation/pic/backup-10-restore-create.png b/docs/zh/08-operation/pic/backup-10-restore-create.png new file mode 100644 index 0000000000..e0e22160d0 Binary files /dev/null and b/docs/zh/08-operation/pic/backup-10-restore-create.png differ diff --git a/docs/zh/08-operation/pic/backup-11-restore-list.png b/docs/zh/08-operation/pic/backup-11-restore-list.png new file mode 100644 index 0000000000..ca1f1b45d5 Binary files /dev/null and b/docs/zh/08-operation/pic/backup-11-restore-list.png differ diff --git a/docs/zh/08-operation/pic/replica2.png b/docs/zh/08-operation/pic/replica2.png new file mode 100644 index 0000000000..985f7b35c4 Binary files /dev/null and b/docs/zh/08-operation/pic/replica2.png differ diff --git a/docs/zh/10-third-party/03-visual/01-grafana.mdx b/docs/zh/10-third-party/03-visual/01-grafana.mdx index d7406352c9..043cfcaa5c 100644 --- a/docs/zh/10-third-party/03-visual/01-grafana.mdx +++ b/docs/zh/10-third-party/03-visual/01-grafana.mdx @@ -10,15 +10,11 @@ import TabItem from "@theme/TabItem"; ## 概述 本文档介绍如何将 TDengine 数据源与开源数据可视化系统 [Grafana](https://www.grafana.com/) 集成,以实现数据的可视化和监测报警系统的搭建。通过 TDengine 插件,您可以轻松地将 TDengine 数据表的数据展示在 Grafana 仪表盘上,且无需进行复杂的开发工作。 -## Grafana 版本要求 -当前 TDengine 支持 Grafana 7.5 及以上版本,建议使用最新版本。请根据您的系统环境下载并安装对应版本的 Grafana。 - - ## 前置条件 要让 Grafana 能正常添加 TDengine 数据源,需要以下几方面的准备工作。 -- Grafana 服务已经部署并正常运行。 +- Grafana 服务已经部署并正常运行。当前 TDengine 支持 Grafana 7.5 及以上版本,建议使用最新版本。 **注意**:要确保启动 Grafana 的账号有其安装目录的写权限,否则可能后面无法安装插件。 - TDengine 集群已经部署并正常运行。 - taosAdapter 已经安装并正常运行。具体细节请参考 [taosAdapter 的使用手册](../../../reference/components/taosadapter) diff --git a/docs/zh/10-third-party/05-bi/05-yhbi.md b/docs/zh/10-third-party/05-bi/05-yhbi.md index b60b0495f0..70dda71051 100644 --- a/docs/zh/10-third-party/05-bi/05-yhbi.md +++ b/docs/zh/10-third-party/05-bi/05-yhbi.md @@ -10,13 +10,10 @@ toc_max_heading_level: 4 一旦数据源配置完成,永洪BI便能直接从TDengine中读取数据,并利用其强大的数据处理和分析功能,为用户提供丰富的数据展示、分析和预测能力。这意味着用户无须编写复杂的代码或进行烦琐的数据转换工作,即可轻松获取所需的业务洞察。 -## 安装永洪 BI +## 前置条件 -确保永洪 BI 已经安装并运行(如果未安装,请到永洪科技官方下载页面下载)。 - -## 安装JDBC驱动 - -从 maven.org 下载 TDengine JDBC 连接器文件 “taos-jdbcdriver-3.2.7-dist.jar”,并安装在永洪 BI 的机器上。 +- 确保永洪 BI 已经安装并运行(如果未安装,请到永洪科技官方下载页面下载)。 +- 安装JDBC驱动。从 maven.org 下载 TDengine JDBC 连接器文件 “taos-jdbcdriver-3.4.0-dist.jar”,并安装在永洪 BI 的机器上。 ## 配置JDBC数据源 diff --git a/docs/zh/10-third-party/05-bi/09-seeq.md b/docs/zh/10-third-party/05-bi/09-seeq.md index 7e61cdcb11..e01deb7e84 100644 --- a/docs/zh/10-third-party/05-bi/09-seeq.md +++ b/docs/zh/10-third-party/05-bi/09-seeq.md @@ -8,16 +8,11 @@ Seeq 是制造业和工业互联网(IIOT)高级分析软件。Seeq 支持在 通过 TDengine Java connector, Seeq 可以轻松支持查询 TDengine 提供的时序数据,并提供数据展现、分析、预测等功能。 -## Seeq 安装方法 +## 前置条件 -从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见[Seeq 知识库]( https://support.seeq.com/kb/latest/cloud/)。 +- Seeq 已经安装。从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见[Seeq 知识库]( https://support.seeq.com/kb/latest/cloud/)。 -### TDengine 本地实例安装方法 - -请参考[官网文档](../../../get-started)。 - -### TDengine Cloud 访问方法 -如果使用 Seeq 连接 TDengine Cloud,请在 https://cloud.taosdata.com 申请帐号并登录查看如何访问 TDengine Cloud。 +- TDengine 本地实例已安装。 请参考[官网文档](../../../get-started)。 若使用 TDengine Cloud,请在 https://cloud.taosdata.com 申请帐号并登录查看如何访问 TDengine Cloud。 ## 配置 Seeq 访问 TDengine diff --git a/docs/zh/10-third-party/05-bi/11-superset.md b/docs/zh/10-third-party/05-bi/11-superset.md index b0ef0dd7d1..337fc53825 100644 --- a/docs/zh/10-third-party/05-bi/11-superset.md +++ b/docs/zh/10-third-party/05-bi/11-superset.md @@ -4,26 +4,27 @@ title: 与 Superset 集成 --- ‌Apache Superset‌ 是一个现代的企业级商业智能(BI)Web 应用程序,主要用于数据探索和可视化。它由 Apache 软件基金会支持,是一个开源项目,它拥有活跃的社区和丰富的生态系统。Apache Superset 提供了直观的用户界面,使得创建、分享和可视化数据变得简单,同时支持多种数据源和丰富的可视化选项‌。 -通过 TDengine 的 Python 连接器, ‌Superset‌ 可支持 TDengine 数据源并提供数据展现、分析等功能 +通过 TDengine 的 Python 连接器, ‌Apache ‌Superset‌ 可支持 TDengine 数据源并提供数据展现、分析等功能 -## 安装 Apache Superset -确保已安装 Apache Superset v2.1.0 及以上版本, 如未安装,请到其 [官网](https://superset.apache.org/) 安装 +## 前置条件 -## 安装 TDengine +准备以下环境: +- TDengine 集群已部署并正常运行(企业及社区版均可) +- taosAdapter 能够正常运行。详细参考 [taosAdapter 使用手册](../../../reference/components/taosadapter) +- Apache Superset v2.1.0 或以上版本已安装。安装 Apache Superset 请参考 [官方文档](https://superset.apache.org/) -TDengine 企业版及社区版均可支持,版本要求在 3.0 及以上 ## 安装 TDengine Python 连接器 -TDengine Python 连接器从 `v2.1.18` 开始自带 Superset 连接驱动,安装程序会把连接驱动安装到 Superset 相应目录下并向 Superset 提供数据源服务 -Superset 与 TDengine 之间使用 WebSocket 协议连接,所以需另安装支持 WebSocket 连接协议的组件 `taos-ws-py` , 全部安装脚本如下: +TDengine Python 连接器从 `v2.1.18` 起带 Superset 连接驱动,会安装至 Superset 相应目录下并向 Superset 提供数据源服务 +Superset 与 TDengine 之间使用 WebSocket 协议连接,需安装支持此协议的 `taos-ws-py` 组件, 全部安装脚本如下: ```bash pip3 install taospy pip3 install taos-ws-py ``` -## Superset 中配置 TDengine 连接 +## 配置 TDengine 数据源 **第 1 步**,进入新建数据库连接页面 "Superset" → "Setting" → "Database Connections" → "+DATABASE" **第 2 步**,选择 TDengine 数据库连接。"SUPPORTED DATABASES" 下拉列表中选择 "TDengine" 项。 @@ -47,7 +48,7 @@ taosws://用户名:密码@主机名:端口号 ```bash taosws://root:taosdata@localhost:6041 ``` -第5步,配置好连接串,点击 “TEST CONNECTION” 测试连接是否成功,测试通过后点击 “CONNECT” 按钮,完成连接。 +**第 5 步**,配置好连接串,点击 “TEST CONNECTION” 测试连接是否成功,测试通过后点击 “CONNECT” 按钮,完成连接。 ## 开始使用 diff --git a/docs/zh/14-reference/01-components/04-taosx.md b/docs/zh/14-reference/01-components/04-taosx.md index 114a6b1ce5..4386d49b22 100644 --- a/docs/zh/14-reference/01-components/04-taosx.md +++ b/docs/zh/14-reference/01-components/04-taosx.md @@ -282,9 +282,6 @@ d4,2017-07-14T10:40:00.006+08:00,-2.740636,10,-0.893545,7,California.LosAngles # listen to ip:port address #listen = "0.0.0.0:6050" -# database url -#database_url = "sqlite:taosx.db" - # default global request timeout which unit is second. This parameter takes effect for certain interfaces that require a timeout setting #request_timeout = 30 @@ -384,6 +381,16 @@ Linux 下 `journalctl` 查看日志的命令如下: journalctl -u taosx [-f] ``` +### 从旧版本升级 + +从 3.3.5.0/3.3.4.11 版本开始,不再在 systemd 服务文件预置 `DATABASE_URL=` 环境变量。如果在配置文件中使用了自定义 `data_dir` 参数,从旧版本升级后,必须将数据库文件从旧版本移动到自定义目录中。以 `data_dir = "/path/to/data"` 为例,执行以下命令: + +```shell +mv /var/lib/taos/taosx/taosx.db* /path/to/data/ +``` + +之后再重启服务(此行为仅影响 Linux 系统)。 + ## taosX 监控指标 taosX 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeeper 写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。以下是这些监控指标的详细介绍。 diff --git a/docs/zh/14-reference/01-components/05-taosx-agent.md b/docs/zh/14-reference/01-components/05-taosx-agent.md index bf2e6f7e78..1f1276e834 100644 --- a/docs/zh/14-reference/01-components/05-taosx-agent.md +++ b/docs/zh/14-reference/01-components/05-taosx-agent.md @@ -12,7 +12,8 @@ sidebar_label: taosX-Agent - `endpoint`: 必填,`taosX` 的 GRPC 服务地址。 - `token`: 必填,在 `Explorer` 上创建 `Agent` 时,产生的 Token。 - `instanceId`:当前 taosx-agent 服务的实例 ID,如果同一台机器上启动了多个 taosx-agent 实例,必须保证各个实例的实例 ID 互不相同。 -- `compression`: 非必填,可配置为 `ture` 或 `false`, 默认为 `false`。配置为`true`, 则开启 `Agent` 和 `taosX` 通信数据压缩。 +- `compression`: 非必填,可配置为 `true` 或 `false`, 默认为 `false`。配置为`true`, 则开启 `Agent` 和 `taosX` 通信数据压缩。 +- `in_memory_cache_capacity`: 非必填,表示可在内存中缓存的最大消息批次数,可配置为大于 0 的整数。默认为 `64`。 - `log_level`: 非必填,日志级别,默认为 `info`, 同 `taosX` 一样,支持 `error`,`warn`,`info`,`debug`,`trace` 五级。已弃用,请使用 `log.level` 代替。 - `log_keep_days`:非必填,日志保存天数,默认为 `30` 天。已弃用,请使用 `log.keepDays` 代替。 - `log.path`:日志文件存放的目录。 @@ -44,6 +45,10 @@ sidebar_label: taosX-Agent # #compression = true +# In-memory cache capacity +# +#in_memory_cache_capacity = 64 + # log configuration [log] # All log files are stored in this directory diff --git a/docs/zh/14-reference/02-tools/09-taosdump.md b/docs/zh/14-reference/02-tools/09-taosdump.md index 7afe8721ee..6a4df44f25 100644 --- a/docs/zh/14-reference/02-tools/09-taosdump.md +++ b/docs/zh/14-reference/02-tools/09-taosdump.md @@ -4,26 +4,17 @@ sidebar_label: taosdump toc_max_heading_level: 4 --- -taosdump 是一个支持从运行中的 TDengine 集群备份数据并将备份的数据恢复到相同或另一个运行中的 TDengine 集群中的工具应用程序。 +taosdump 是为开源用户提供的 TDengine 数据备份/恢复工具,备份数据文件采用标准 [ Apache AVRO ](https://avro.apache.org/) 格式,方便与外界生态交换数据。taosdump 提供多种数据备份及恢复选项来满足不同需求,可通过 --help 查看支持的全部选项。 -taosdump 可以用数据库、超级表或普通表作为逻辑数据单元进行备份,也可以对数据库、超级 -表和普通表中指定时间段内的数据记录进行备份。使用时可以指定数据备份的目录路径,如果 -不指定位置,taosdump 默认会将数据备份到当前目录。 - -如果指定的位置已经有数据文件,taosdump 会提示用户并立即退出,避免数据被覆盖。这意味着同一路径只能被用于一次备份。 -如果看到相关提示,请小心操作。 - -taosdump 是一个逻辑备份工具,它不应被用于备份任何原始数据、环境设置、 -硬件信息、服务端配置或集群的拓扑结构。taosdump 使用 -[ Apache AVRO ](https://avro.apache.org/)作为数据文件格式来存储备份数据。 ## 安装 -taosdump 有两种安装方式: +taosdump 提供两种安装方式: -- 安装 taosTools 官方安装包, 请从[发布历史页面](https://docs.taosdata.com/releases/tools/)页面找到 taosTools 并下载安装。 +- taosdump 是 TDengine 安装包中默认安装组件,安装 TDengine 后即可使用,可参考[TDengine 安装](../../../get-started/) + +- 单独编译 taos-tools 并安装, 参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。 -- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。 ## 常用使用场景 @@ -31,9 +22,11 @@ taosdump 有两种安装方式: 1. 备份所有数据库:指定 `-A` 或 `--all-databases` 参数; 2. 备份多个指定数据库:使用 `-D db1,db2,...` 参数; -3. 备份指定数据库中的某些超级表或普通表:使用 `dbname stbname1 stbname2 tbname1 tbname2 ...` 参数,注意这种输入序列第一个参数为数据库名称,且只支持一个数据库,第二个和之后的参数为该数据库中的超级表或普通表名称,中间以空格分隔; +3. 备份指定数据库中某些超级表或普通表:使用 `dbname stbname1 stbname2 tbname1 tbname2 ...` 参数,注意这种输入序列第一个参数为数据库名称,且只支持一个数据库,第二个和之后的参数为该数据库中的超级表或普通表名称,中间以空格分隔; 4. 备份系统 log 库:TDengine 集群通常会包含一个系统数据库,名为 `log`,这个数据库内的数据为 TDengine 自我运行的数据,taosdump 默认不会对 log 库进行备份。如果有特定需求对 log 库进行备份,可以使用 `-a` 或 `--allow-sys` 命令行参数。 5. “宽容”模式备份:taosdump 1.4.1 之后的版本提供 `-n` 参数和 `-L` 参数,用于备份数据时不使用转义字符和“宽容”模式,可以在表名、列名、标签名没使用转义字符的情况下减少备份数据时间和备份数据占用空间。如果不确定符合使用 `-n` 和 `-L` 条件时请使用默认参数进行“严格”模式进行备份。转义字符的说明请参考[官方文档](../../taos-sql/escape)。 +6. `-o` 参数指定的目录下如果已存在备份文件,为防止数据被覆盖,taosdump 会报错并退出,请更换其它空目录或清空原来数据后再备份。 +7. 目前 taosdump 不支持数据断点继备功能,一旦数据备份中断,需要从头开始。如果备份需要很长时间,建议使用(-S -E 选项)指定开始/结束时间进行分段备份的方法, :::tip - taosdump 1.4.1 之后的版本提供 `-I` 参数,用于解析 avro 文件 schema 和数据,如果指定 `-s` 参数将只解析 schema。 @@ -45,7 +38,9 @@ taosdump 有两种安装方式: ### taosdump 恢复数据 -恢复指定路径下的数据文件:使用 `-i` 参数加上数据文件所在路径。如前面提及,不应该使用同一个目录备份不同数据集合,也不应该在同一路径多次备份同一数据集,否则备份数据会造成覆盖或多次备份。 +- 恢复指定路径下的数据文件:使用 `-i` 参数加上数据文件所在路径。如前面提及,不应该使用同一个目录备份不同数据集合,也不应该在同一路径多次备份同一数据集,否则备份数据会造成覆盖或多次备份。 +- taosdump 支持数据恢复至新数据库名下,参数是 -W, 详细见命令行参数说明。 + :::tip taosdump 内部使用 TDengine stmt binding API 进行恢复数据的写入,为提高数据恢复性能,目前使用 16384 为一次写入批次。如果备份数据中有比较多列数据,可能会导致产生 "WAL size exceeds limit" 错误,此时可以通过使用 `-B` 参数调整为一个更小的值进行尝试。 @@ -108,6 +103,13 @@ Usage: taosdump [OPTION...] dbname [tbname ...] the table name.(Version 2.5.3) -T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is 8. + -W, --rename=RENAME-LIST Rename database name with new name during + importing data. RENAME-LIST: + "db1=newDB1|db2=newDB2" means rename db1 to newDB1 + and rename db2 to newDB2 (Version 2.5.4) + -k, --retry-count=VALUE Set the number of retry attempts for connection or + query failures + -z, --retry-sleep-ms=VALUE retry interval sleep time, unit ms -C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service -R, --restful Use RESTful interface to connect TDengine -t, --timeout=SECONDS The timeout seconds for websocket to interact. @@ -115,10 +117,6 @@ Usage: taosdump [OPTION...] dbname [tbname ...] -?, --help Give this help list --usage Give a short usage message -V, --version Print program version - -W, --rename=RENAME-LIST Rename database name with new name during - importing data. RENAME-LIST: - "db1=newDB1|db2=newDB2" means rename db1 to newDB1 - and rename db2 to newDB2 (Version 2.5.4) Mandatory or optional arguments to long options are also mandatory or optional for any corresponding short options. diff --git a/docs/zh/14-reference/02-tools/10-taosbenchmark.md b/docs/zh/14-reference/02-tools/10-taosbenchmark.md index d655290577..44dab0ad5f 100644 --- a/docs/zh/14-reference/02-tools/10-taosbenchmark.md +++ b/docs/zh/14-reference/02-tools/10-taosbenchmark.md @@ -4,59 +4,59 @@ sidebar_label: taosBenchmark toc_max_heading_level: 4 --- -taosBenchmark (曾用名 taosdemo ) 是一个用于测试 TDengine 产品性能的工具。taosBenchmark 可以测试 TDengine 的插入、查询和订阅等功能的性能,它可以模拟由大量设备产生的大量数据,还可以灵活地控制数据库、超级表、标签列的数量和类型、数据列的数量和类型、子表的数量、每张子表的数据量、插入数据的时间间隔、taosBenchmark 的工作线程数量、是否以及如何插入乱序数据等。为了兼容过往用户的使用习惯,安装包提供 了 taosdemo 作为 taosBenchmark 的软链接。 +taosBenchmark 是 TDengine 产品性能基准测试工具,提供对 TDengine 产品写入、查询及订阅性能测试,输出性能指标。 ## 安装 -taosBenchmark 有两种安装方式: +taosBenchmark 提供两种安装方式: -- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark, 详情请参考[ TDengine 安装](../../../get-started/)。 +- taosBenchmark 是 TDengine 安装包中默认安装组件,安装 TDengine 后即可使用,参考 [TDengine 安装](../../../get-started/) -- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。 +- 单独编译 taos-tools 并安装, 参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。 ## 运行 -### 配置和运行方式 +### 运行方式 -taosBenchmark 需要在操作系统的终端执行,该工具支持两种配置方式:[命令行参数](#命令行参数详解) 和 [JSON 配置文件](#配置文件参数详解)。这两种方式是互斥的,在使用配置文件时只能使用一个命令行参数 `-f ` 指定配置文件。在使用命令行参数运行 taosBenchmark 并控制其行为时则不能使用 `-f` 参数而要用其它参数来进行配置。除此之外,taosBenchmark 还提供了一种特殊的运行方式,即无参数运行。 +taosBenchmark 支持三种运行模式: +- 无参数模式 +- 命令行模式 +- JSON 配置文件模式 +`命令行方式` 为 `JSON 配置文件方式` 功能子集,两者都使用时,命令行方式优先。 -taosBenchmark 支持对 TDengine 做完备的性能测试,其所支持的 TDengine 功能分为三大类:写入、查询和订阅。这三种功能之间是互斥的,每次运行 taosBenchmark 只能选择其中之一。值得注意的是,所要测试的功能类型在使用命令行配置方式时是不可配置的,命令行配置方式只能测试写入性能。若要测试 TDengine 的查询和订阅性能,必须使用配置文件的方式,通过配置文件中的参数 `filetype` 指定所要测试的功能类型。 **在运行 taosBenchmark 之前要确保 TDengine 集群已经在正确运行。** ### 无命令行参数运行 -执行下列命令即可快速体验 taosBenchmark 对 TDengine 进行基于默认配置的写入性能测试。 - ```bash taosBenchmark ``` -在无参数运行时,taosBenchmark 默认连接 `/etc/taos` 下指定的 TDengine 集群,并在 TDengine 中创建一个名为 test 的数据库,test 数据库下创建名为 meters 的一张超级表,超级表下创建 10000 张表,每张表中写入 10000 条记录。注意,如果已有 test 数据库,这个命令会先删除该数据库后建立一个全新的 test 数据库。 +在无参数运行时,taosBenchmark 默认连接 `/etc/taos/taos.cfg` 中指定的 TDengine 集群。 +连接成功后,会默认创建智能电表示例数据库 test,创建超级表 meters, 创建子表 1 万,每子写入数据 1 万条,若 test 库已存在,默认会先删再建。 -### 使用命令行配置参数运行 - -在使用命令行参数运行 taosBenchmark 并控制其行为时,`-f ` 参数不能使用。所有配置参数都必须通过命令行指定。以下是使用命令行方式测试 taosBenchmark 写入性能的一个示例。 +### 使用命令行参数运行 +命令行支持的参数为写入功能中使用较为频繁的参数,查询与订阅功能不支持命令行方式 +示例: ```bash -taosBenchmark -I stmt -n 200 -t 100 +taosBenchmark -d db -t 100 -n 1000 -T 4 -I stmt -y ``` -上面的命令 `taosBenchmark` 将创建一个名为`test`的数据库,在其中建立一张超级表`meters`,在该超级表中建立 100 张子表并使用参数绑定的方式为每张子表插入 200 条记录。 +此命令表示使用 `taosBenchmark` 将创建一个名为 `db` 的数据库,并建立默认超级表 `meters`,子表 100 ,使用参数绑定(stmt)方式为每张子表写入 1000 条记录。 ### 使用配置文件运行 -taosBenchmark 安装包中提供了配置文件的示例,位于 `/examples/taosbenchmark-json` 下 - -使用如下命令行即可运行 taosBenchmark 并通过配置文件控制其行为。 +配置文件方式运行提供了全部功能,所有命令行参数都可以在配置文件中配置运行 ```bash taosBenchmark -f ``` -**下面是几个配置文件的示例:** +**下面为支持的写入、查询、订阅三大功能的配置文件示例:** -#### 插入场景 JSON 配置文件示例 +#### 写入场景 JSON 配置文件示例

insert.json @@ -89,130 +89,102 @@ taosBenchmark -f
+查看更多 json 配置文件示例可 [点击这里](https://github.com/taosdata/taos-tools/tree/main/example) + ## 命令行参数详解 +| 命令行参数 | 功能说明 | +| ---------------------------- | ----------------------------------------------- | +| -f/--file \ | 要使用的 JSON 配置文件,由该文件指定所有参数,本参数与命令行其他参数不能同时使用。没有默认值 | +| -c/--config-dir \ | TDengine 集群配置文件所在的目录,默认路径是 /etc/taos | +| -h/--host \ | 指定要连接的 TDengine 服务端的 FQDN,默认值为 localhost | +| -P/--port \ | 要连接的 TDengine 服务器的端口号,默认值为 6030 | +| -I/--interface \ | 插入模式,可选项有 taosc, rest, stmt, sml, sml-rest, 分别对应普通写入、restful 接口写入、参数绑定接口写入、schemaless 接口写入、restful schemaless 接口写入 (由 taosAdapter 提供)。默认值为 taosc | +| -u/--user \ | 用于连接 TDengine 服务端的用户名,默认为 root | +| -U/--supplement-insert | 写入数据而不提前建数据库和表,默认关闭 | +| -p/--password \ | 用于连接 TDengine 服务端的密码,默认值为 taosdata | +| -o/--output \ | 结果输出文件的路径,默认值为 ./output.txt | +| -T/--thread \ | 插入数据的线程数量,默认为 8 | +| -B/--interlace-rows \ |启用交错插入模式并同时指定向每个子表每次插入的数据行数。交错插入模式是指依次向每张子表插入由本参数所指定的行数并重复这个过程,直到所有子表的数据都插入完成。默认值为 0, 即向一张子表完成数据插入后才会向下一张子表进行数据插入 | +| -i/--insert-interval \ | 指定交错插入模式的插入间隔,单位为 ms,默认值为 0。 只有当 `-B/--interlace-rows` 大于 0 时才起作用 |意味着数据插入线程在为每个子表插入隔行扫描记录后,会等待该值指定的时间间隔后再进行下一轮写入 | +| -r/--rec-per-req \ | 每次向 TDengine 请求写入的数据行数,默认值为 30000 | +| -t/--tables \ | 指定子表的数量,默认为 10000 | +| -S/--timestampstep \ | 每个子表中插入数据的时间戳步长,单位是 ms,默认值是 1 | +| -n/--records \ | 每个子表插入的记录数,默认值为 10000 | +| -d/--database \ | 所使用的数据库的名称,默认值为 test | +| -b/--data-type \ | 指定超级表普通列数据类型, 多个使用逗号分隔,默认值: "FLOAT,INT,FLOAT" 如:`taosBenchmark -b "FLOAT,BINARY(8),NCHAR(16)"`| +| -A/--tag-type \ | 指定超级表标签列数据类型,多个使用逗号分隔,默认值: "INT,BINARY(24)" 如:`taosBenchmark -A "INT,BINARY(8),NCHAR(8)"`| +| -l/--columns \ | 超级表的数据列的总数量。如果同时设置了该参数和 `-b/--data-type`,则最后的结果列数为两者取大。如果本参数指定的数量大于 `-b/--data-type` 指定的列数,则未指定的列类型默认为 INT, 例如: `-l 5 -b float,double`, 那么最后的列为 `FLOAT,DOUBLE,INT,INT,INT`。如果 columns 指定的数量小于或等于 `-b/--data-type` 指定的列数,则结果为 `-b/--data-type` 指定的列和类型,例如: `-l 3 -b float,double,float,bigint`,那么最后的列为 `FLOAT,DOUBLE,FLOAT,BIGINT` | +| -L/--partial-col-num \ | 指定某些列写入数据,其他列数据为 NULL。默认所有列都写入数据 | +| -w/--binwidth \ | nchar 和 binary 类型的默认长度,默认值为 64 | +| -m/--table-prefix \ | 子表名称的前缀,默认值为 "d" | +| -E/--escape-character | 开关参数,指定在超级表和子表名称中是否使用转义字符。默认值为不使用 | +| -C/--chinese | 开关参数,指定 nchar 和 binary 是否使用 Unicode 中文字符。默认值为不使用 | +| -N/--normal-table | 开关参数,指定只创建普通表,不创建超级表。默认值为 false。仅当插入模式为 taosc, stmt, rest 模式下可以使用 | +| -M/--random | 开关参数,插入数据为生成的随机值。默认值为 false。若配置此参数,则随机生成要插入的数据。对于数值类型的 标签列/数据列,其值为该类型取值范围内的随机值。对于 NCHAR 和 BINARY 类型的 标签列/数据列,其值为指定长度范围内的随机字符串 | +| -x/--aggr-func | 开关参数,指示插入后查询聚合函数。默认值为 false | +| -y/--answer-yes | 开关参数,要求用户在提示后确认才能继续 |默认值为 false 。 +| -O/--disorder \ | 指定乱序数据的百分比概率,其值域为 [0,50]。默认为 0,即没有乱序数据 | +| -R/--disorder-range \ | 指定乱序数据的时间戳回退范围。所生成的乱序时间戳为非乱序情况下应该使用的时间戳减去这个范围内的一个随机值。仅在 `-O/--disorder` 指定的乱序数据百分比大于 0 时有效| +| -F/--prepare_rand \ | 生成的随机数据中唯一值的数量。若为 1 则表示所有数据都相同。默认值为 10000 | +| -a/--replica \ | 创建数据库时指定其副本数,默认值为 1 | +| -k/--keep-trying \ | 失败后进行重试的次数,默认不重试。需使用 v3.0.9 以上版本| +| -z/--trying-interval \ | 失败重试间隔时间,单位为毫秒,仅在 -k 指定重试后有效。需使用 v3.0.9 以上版本 | +| -v/--vgroups \ | 创建数据库时指定 vgroups 数,仅对 TDengine v3.0+ 有效| +| -V/--version | 显示版本信息并退出。不能与其它参数混用| +| -?/--help | 显示帮助信息并退出。不能与其它参数混用| -- **-f/--file \** : - 要使用的 JSON 配置文件,由该文件指定所有参数,本参数与命令行其他参数不能同时使用。没有默认值。 -- **-c/--config-dir \** : - TDengine 集群配置文件所在的目录,默认路径是 /etc/taos 。 +## 输出性能指标 -- **-h/--host \** : - 指定要连接的 TDengine 服务端的 FQDN,默认值为 localhost 。 - -- **-P/--port \** : - 要连接的 TDengine 服务器的端口号,默认值为 6030 。 - -- **-I/--interface \** : - 插入模式,可选项有 taosc, rest, stmt, sml, sml-rest, 分别对应普通写入、restful 接口写入、参数绑定接口写入、schemaless 接口写入、restful schemaless 接口写入 (由 taosAdapter 提供)。默认值为 taosc。 - -- **-u/--user \** : - 用于连接 TDengine 服务端的用户名,默认为 root 。 - -- **-U/--supplement-insert ** : - 写入数据而不提前建数据库和表,默认关闭。 - -- **-p/--password \** : - 用于连接 TDengine 服务端的密码,默认值为 taosdata。 - -- **-o/--output \** : - 结果输出文件的路径,默认值为 ./output.txt。 - -- **-T/--thread \** : - 插入数据的线程数量,默认为 8 。 - -- **-B/--interlace-rows \** : - 启用交错插入模式并同时指定向每个子表每次插入的数据行数。交错插入模式是指依次向每张子表插入由本参数所指定的行数并重复这个过程,直到所有子表的数据都插入完成。默认值为 0, 即向一张子表完成数据插入后才会向下一张子表进行数据插入。 - -- **-i/--insert-interval \** : - 指定交错插入模式的插入间隔,单位为 ms,默认值为 0。 只有当 `-B/--interlace-rows` 大于 0 时才起作用。意味着数据插入线程在为每个子表插入隔行扫描记录后,会等待该值指定的时间间隔后再进行下一轮写入。 - -- **-r/--rec-per-req \** : - 每次向 TDengine 请求写入的数据行数,默认值为 30000 。 - -- **-t/--tables \** : - 指定子表的数量,默认为 10000 。 - -- **-S/--timestampstep \** : - 每个子表中插入数据的时间戳步长,单位是 ms,默认值是 1。 - -- **-n/--records \** : - 每个子表插入的记录数,默认值为 10000 。 - -- **-d/--database \** : - 所使用的数据库的名称,默认值为 test 。 - -- **-b/--data-type \** : - 超级表的数据列的类型。如果不使用则默认为有三个数据列,其类型分别为 FLOAT, INT, FLOAT 。 - -- **-l/--columns \** : - 超级表的数据列的总数量。如果同时设置了该参数和 `-b/--data-type`,则最后的结果列数为两者取大。如果本参数指定的数量大于 `-b/--data-type` 指定的列数,则未指定的列类型默认为 INT, 例如: `-l 5 -b float,double`, 那么最后的列为 `FLOAT,DOUBLE,INT,INT,INT`。如果 columns 指定的数量小于或等于 `-b/--data-type` 指定的列数,则结果为 `-b/--data-type` 指定的列和类型,例如: `-l 3 -b float,double,float,bigint`,那么最后的列为 `FLOAT,DOUBLE,FLOAT,BIGINT` 。 - -- **-L/--partial-col-num \ **: - 指定某些列写入数据,其他列数据为 NULL。默认所有列都写入数据。 - -- **-A/--tag-type \** : - 超级表的标签列类型。nchar 和 binary 类型可以同时设置长度,例如: +#### 写入指标 +写入结束后会在最后两行输出总体性能指标,格式如下: +``` bash +SUCC: Spent 8.527298 (real 8.117379) seconds to insert rows: 10000000 with 8 thread(s) into test 1172704.41 (real 1231924.74) records/second +SUCC: insert delay, min: 19.6780ms, avg: 64.9390ms, p90: 94.6900ms, p95: 105.1870ms, p99: 130.6660ms, max: 157.0830ms ``` -taosBenchmark -A INT,DOUBLE,NCHAR,BINARY(16) +第一行写入速度统计: + - Spent: 写入总耗时,单位秒,从开始写入第一个数据开始计时到最后一条数据结束,这里表示共花了 8.527298 秒 + - real : 写入总耗时(调用引擎),此耗时已抛去测试框架准备数据时间,纯统计在引擎调用上花费的时间,示例为 8.117379 秒,8.527298 - 8.117379 = 0.409919 秒则为测试框架准备数据消耗时间 + - rows : 写入总行数,为 1000 万条数据 + - threads: 写入线程数,这里是 8 个线程同时写入 + - records/second 写入速度 = `写入总耗时`/ `写入总行数` , 括号中 `real` 同前,表示纯引擎写入速度 +第二行单个写入延时统计: + - min : 写入最小延时 + - avg : 写入平时延时 + - p90 : 写入延时 p90 百分位上的延时数 + - p95 : 写入延时 p95 百分位上的延时数 + - p99 : 写入延时 p99 百分位上的延时数 + - max : 写入最大延时 +通过此系列指标,可观察到写入请求延时分布情况 + +#### 查询指标 + +查询性能测试主要输出查询请求速度 QPS 指标, 输出格式如下: +``` bash +complete query with 3 threads and 10000 query delay avg: 0.002686s min: 0.001182s max: 0.012189s p90: 0.002977s p95: 0.003493s p99: 0.004645s SQL command: select ... +INFO: Total specified queries: 30000 +INFO: Spend 26.9530 second completed total queries: 30000, the QPS of all threads: 1113.049 ``` +- 第一行表示 3 个线程每个线程执行 10000 次查询及查询请求延时百分位分布情况,`SQL command` 为测试的查询语句 +- 第二行表示总共完成了 10000 * 3 = 30000 次查询总数 +- 第三行表示查询总耗时为 26.9653 秒,每秒查询率(QPS)为:1113.049 次/秒 -如果没有设置标签类型,默认是两个标签,其类型分别为 INT 和 BINARY(16)。 -注意:在有的 shell 比如 bash 命令里面 “()” 需要转义,则上述指令应为: +#### 订阅指标 +订阅性能测试主要输出消费者消费速度指标,输出格式如下: +``` bash +INFO: consumer id 0 has poll total msgs: 376, period rate: 37.592 msgs/s, total rows: 3760000, period rate: 375924.815 rows/s +INFO: consumer id 1 has poll total msgs: 362, period rate: 36.131 msgs/s, total rows: 3620000, period rate: 361313.504 rows/s +INFO: consumer id 2 has poll total msgs: 364, period rate: 36.378 msgs/s, total rows: 3640000, period rate: 363781.731 rows/s +INFO: consumerId: 0, consume msgs: 1000, consume rows: 10000000 +INFO: consumerId: 1, consume msgs: 1000, consume rows: 10000000 +INFO: consumerId: 2, consume msgs: 1000, consume rows: 10000000 +INFO: Consumed total msgs: 3000, total rows: 30000000 ``` -taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) -``` - -- **-w/--binwidth \**: - nchar 和 binary 类型的默认长度,默认值为 64。 - -- **-m/--table-prefix \** : - 子表名称的前缀,默认值为 "d"。 - -- **-E/--escape-character** : - 开关参数,指定在超级表和子表名称中是否使用转义字符。默认值为不使用。 - -- **-C/--chinese** : - 开关参数,指定 nchar 和 binary 是否使用 Unicode 中文字符。默认值为不使用。 - -- **-N/--normal-table** : - 开关参数,指定只创建普通表,不创建超级表。默认值为 false。仅当插入模式为 taosc, stmt, rest 模式下可以使用。 - -- **-M/--random** : - 开关参数,插入数据为生成的随机值。默认值为 false。若配置此参数,则随机生成要插入的数据。对于数值类型的 标签列/数据列,其值为该类型取值范围内的随机值。对于 NCHAR 和 BINARY 类型的 标签列/数据列,其值为指定长度范围内的随机字符串。 - -- **-x/--aggr-func** : - 开关参数,指示插入后查询聚合函数。默认值为 false。 - -- **-y/--answer-yes** : - 开关参数,要求用户在提示后确认才能继续。默认值为 false 。 - -- **-O/--disorder \** : - 指定乱序数据的百分比概率,其值域为 [0,50]。默认为 0,即没有乱序数据。 - -- **-R/--disorder-range \** : - 指定乱序数据的时间戳回退范围。所生成的乱序时间戳为非乱序情况下应该使用的时间戳减去这个范围内的一个随机值。仅在 `-O/--disorder` 指定的乱序数据百分比大于 0 时有效。 - -- **-F/--prepare_rand \** : - 生成的随机数据中唯一值的数量。若为 1 则表示所有数据都相同。默认值为 10000 。 - -- **-a/--replica \** : - 创建数据库时指定其副本数,默认值为 1 。 - -- ** -k/--keep-trying \** : 失败后进行重试的次数,默认不重试。需使用 v3.0.9 以上版本。 - -- ** -z/--trying-interval \** : 失败重试间隔时间,单位为毫秒,仅在 -k 指定重试后有效。需使用 v3.0.9 以上版本。 - -- **-v/--vgroups \** : - 创建数据库时指定 vgroups 数,仅对 TDengine v3.0+ 有效。 - -- **-V/--version** : - 显示版本信息并退出。不能与其它参数混用。 - -- **-?/--help** : - 显示帮助信息并退出。不能与其它参数混用。 +- 1 ~ 3 行实时输出每个消费者当前的消费速度,`msgs/s` 表示消费消息个数,每个消息中包含多行数据,`rows/s` 表示按行数统计的消费速度 +- 4 ~ 6 行是测试完成后每个消费者总体统计,统计共消费了多少条消息,共计多少行 +- 第 7 行所有消费者总体统计,`msgs` 表示共消费了多少条消息, `rows` 表示共消费了多少行数据 ## 配置文件参数详解 @@ -220,7 +192,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) 本节所列参数适用于所有功能模式。 -- **filetype** : 要测试的功能,可选值为 `insert`, `query` 和 `subscribe`。分别对应插入、查询和订阅功能。每个配置文件中只能指定其中之一。 +- **filetype** : 功能分类,可选值为 `insert`, `query` 和 `subscribe`。分别对应插入、查询和订阅功能。每个配置文件中只能指定其中之一。 - **cfgdir** : TDengine 客户端配置文件所在的目录,默认路径是 /etc/taos 。 - **host** : 指定要连接的 TDengine 服务端的 FQDN,默认值为 localhost。 @@ -252,7 +224,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) - **name** : 数据库名。 -- **drop** : 插入前是否删除数据库,可选项为 "yes" 或者 "no", 为 "no" 时不创建。默认删除。 +- **drop** : 数据库已存在时是否删除重建,可选项为 "yes" 或 "no", 默认为 “yes” #### 流式计算相关配置参数 @@ -331,21 +303,6 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) - **repeat_ts_max** : 数值类型,复合主键开启情况下指定生成相同时间戳记录的最大个数 - **sqls** : 字符串数组类型,指定超级表创建成功后要执行的 sql 数组,sql 中指定表名前面要带数据库名,否则会报未指定数据库错误 -#### tsma配置参数 - -指定tsma的配置参数在 `super_tables` 中的 `tsmas` 中,具体参数如下。 - -- **name** : 指定 tsma 的名字,必选项。 - -- **function** : 指定 tsma 的函数,必选项。 - -- **interval** : 指定 tsma 的时间间隔,必选项。 - -- **sliding** : 指定 tsma 的窗口时间位移,必选项。 - -- **custom** : 指定 tsma 的创建语句结尾追加的自定义配置,可选项。 - -- **start_when_inserted** : 指定当插入多少行时创建 tsma,可选项,默认为 0。 #### 标签列与数据列配置参数 @@ -415,7 +372,8 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) 查询场景下 `filetype` 必须设置为 `query`。 `query_times` 指定运行查询的次数,数值类型 -查询场景可以通过设置 `kill_slow_query_threshold` 和 `kill_slow_query_interval` 参数来控制杀掉慢查询语句的执行,threshold 控制如果 exec_usec 超过指定时间的查询将被 taosBenchmark 杀掉,单位为秒;interval 控制休眠时间,避免持续查询慢查询消耗 CPU ,单位为秒。 +查询场景可以通过设置 `kill_slow_query_threshold` 和 `kill_slow_query_interval` 参数来控制杀掉慢查询语句的执行,threshold 控制如果 exec_usec 超过指定时间的查询将被 taosBenchmark 杀掉,单位为秒; +interval 控制休眠时间,避免持续查询慢查询消耗 CPU ,单位为秒。 其它通用参数详见[通用配置参数](#通用配置参数)。 @@ -423,6 +381,11 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) 查询指定表(可以指定超级表、子表或普通表)的配置参数在 `specified_table_query` 中设置。 +- **mixed_query** : 查询模式,取值 “yes” 为`混合查询`, "no" 为`正常查询` , 默认值为 “no” + `混合查询`:`sqls` 中所有 sql 按 `threads` 线程数分组,每个线程执行一组, 线程中每个 sql 都需执行 `query_times` 次查询 + `正常查询`:`sqls` 中每个 sql 启动 `threads` 个线程,每个线程执行完 `query_times` 次后退出,下个 sql 需等待上个 sql 线程全部执行完退出后方可执行 + 不管 `正常查询` 还是 `混合查询` ,执行查询总次数是相同的 ,查询总次数 = `sqls` 个数 * `threads` * `query_times`, 区别是 `正常查询` 每个 sql 都会启动 `threads` 个线程,而 `混合查询` 只启动一次 `threads` 个线程执行完所有 SQL, 两者启动线程次数不一样。 + - **query_interval** : 查询时间间隔,单位是秒,默认值为 0。 - **threads** : 执行查询 SQL 的线程数,默认值为 1。 @@ -433,7 +396,8 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) #### 查询超级表的配置参数 -查询超级表的配置参数在 `super_table_query` 中设置。 +查询超级表的配置参数在 `super_table_query` 中设置。 +超级表查询的线程模式与上面介绍的指定查询语句查询的 `正常查询` 模式相同,不同之处是本 `sqls` 使用所有子表填充。 - **stblname** : 指定要查询的超级表的名称,必填。 diff --git a/docs/zh/14-reference/03-taos-sql/10-function.md b/docs/zh/14-reference/03-taos-sql/10-function.md index 7799e6f50e..c075545ff3 100644 --- a/docs/zh/14-reference/03-taos-sql/10-function.md +++ b/docs/zh/14-reference/03-taos-sql/10-function.md @@ -193,6 +193,7 @@ ROUND(expr[, digits]) - `digits` 小于零表示丢掉小数位,并将数字四舍五入到小数点左侧 `digits` 位。若小数点左侧的位数小于 `digits`位,返回 0。 - 由于暂未支持 DECIMAL 类型,所以该函数会用 DOUBLE 和 FLOAT 来表示包含小数的结果,但是 DOUBLE 和 FLOAT 是有精度上限的,当位数太多时使用该函数可能没有意义。 - 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 +- `digits` 从 ver-3.3.3.0 开始支持。 **举例**: ```sql @@ -268,6 +269,8 @@ PI() **功能说明**:返回圆周率 π 的值。 +**版本**: ver-3.3.3.0 + **返回结果类型**:DOUBLE。 **适用数据类型**:无。 @@ -295,6 +298,8 @@ TRUNCATE(expr, digits) **功能说明**:获得指定字段按照指定位数截断的值。 +**版本**: ver-3.3.3.0 + **返回结果类型**:与 `expr` 字段的原始数据类型一致。 **适用数据类型**: @@ -333,6 +338,8 @@ EXP(expr) ``` **功能说明**:返回 e(自然对数的底)的指定乘方后的值。 +**版本**: ver-3.3.3.0 + **返回结果类型**:DOUBLE。 **适用数据类型**:数值类型。 @@ -360,6 +367,8 @@ LN(expr) **功能说明**:返回指定参数的自然对数。 +**版本**: ver-3.3.3.0 + **返回结果类型**:DOUBLE。 **适用数据类型**:数值类型。 @@ -388,6 +397,8 @@ MOD(expr1, expr2) **功能说明**:计算 expr1 % expr2 的结果。 +**版本**: ver-3.3.3.0 + **返回结果类型**:DOUBLE。 **适用数据类型**:数值类型。 @@ -421,6 +432,8 @@ RAND([seed]) **功能说明**:返回一个从0到1均匀分布的随机数。 +**版本**: ver-3.3.3.0 + **返回结果类型**:DOUBLE。 **适用数据类型**: @@ -464,6 +477,8 @@ SIGN(expr) **功能说明**:返回指定参数的符号。 +**版本**: ver-3.3.3.0 + **返回结果类型**:与指定字段的原始数据类型一致。 **适用数据类型**:数值类型。 @@ -504,6 +519,8 @@ DEGREES(expr) **功能说明**:计算指定参数由弧度值转为角度后的值。 +**版本**: ver-3.3.3.0 + **返回结果类型**:DOUBLE。 **适用数据类型**:数值类型。 @@ -532,6 +549,8 @@ RADIANS(expr) **功能说明**:计算指定参数由角度值转为弧度后的值。 +**版本**: ver-3.3.3.0 + **返回结果类型**:DOUBLE。 **适用数据类型**:数值类型。 @@ -702,6 +721,8 @@ TRIM([remstr FROM] expr) **功能说明**:返回去掉了所有 remstr 前缀或后缀的字符串 epxr 。 +**版本**: ver-3.3.3.0 + **返回结果类型**:与输入字段 epxr 的原始类型相同。 **适用数据类型**: @@ -773,6 +794,8 @@ SUBSTRING/SUBSTR(expr FROM pos [FOR len]) - 若 `len` 小于 1,返回空串。 - `pos` 是 1-base 的,若 `pos` 为 0,返回空串。 - 若 `pos` + `len` 大于 `len(expr)`,返回从 `pos` 开始到字符串结尾的子串,等同于执行 `substring(expr, pos)`。 +- `SUBSTRING` 函数等价于 `SUBSTR`, 从 ver-3.3.3.0 开始支持。 +- `SUBSTRING/SUBSTR(expr FROM pos [FOR len])` 语法从 ver-3.3.3.0 开始支持。 **举例**: ```sql @@ -809,6 +832,8 @@ SUBSTRING_INDEX(expr, delim, count) **功能说明**:返回字符串 `expr` 在出现指定次数分隔符的位置截取的子串。 +**版本**: ver-3.3.3.0 + **返回结果类型**:与输入字段 `expr` 的原始类型相同。 **适用数据类型**: @@ -862,6 +887,8 @@ CHAR(expr1 [, expr2] [, epxr3] ...) **功能说明**:将输入参数当作整数,并返回这些整数在 ASCII 编码中对应的字符。 +**版本**: ver-3.3.3.0 + **返回结果类型**:VARCHAR。 **适用数据类型**:整数类型,VARCHAR,NCHAR。 @@ -906,6 +933,8 @@ ASCII(expr) **功能说明**:返回字符串第一个字符的 ASCII 码。 +**版本**: ver-3.3.3.0 + **返回结果数据类型**:BIGINT。 **适用数据类型**:VARCHAR, NCHAR。 @@ -933,6 +962,8 @@ POSITION(expr1 IN expr2) **功能说明**:计算字符串 `expr1` 在字符串 `expr2` 中的位置。 +**版本**: ver-3.3.3.0 + **返回结果类型**:BIGINT。 **适用数据类型**: @@ -975,6 +1006,8 @@ REPLACE(expr, from_str, to_str) ``` **功能说明**:将字符串中的 `from_str` 全部替换为 `to_str`。 +**版本**: ver-3.3.3.0 + **返回结果类型**:与输入字段 `expr` 的原始类型相同。 **适用数据类型**: @@ -1005,6 +1038,8 @@ REPEAT(expr, count) ``` **功能说明**:返回将字符串重复指定次数得到的字符串。 +**版本**: ver-3.3.3.0 + **返回结果类型**:与输入字段 `expr` 的原始类型相同。 **适用数据类型**: @@ -1260,6 +1295,7 @@ TIMEDIFF(expr1, expr2 [, time_unit]) - `expr1`:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。 - `expr2`:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。 - `time_unit`:见使用说明。 +- ver-3.3.3.0 之前该函数结果为时间戳 `expr1` 和 `expr2` 的差值的绝对值,结果为正数。 **嵌套子查询支持**:适用于内层查询和外层查询。 @@ -1361,6 +1397,8 @@ WEEK(expr [, mode]) ``` **功能说明**:返回输入日期的周数。 +**版本**: ver-3.3.3.0 + **返回结果类型**:BIGINT。 **适用数据类型**: @@ -1422,6 +1460,8 @@ WEEKOFYEAR(expr) ``` **功能说明**:返回输入日期的周数。 +**版本**: ver-3.3.3.0 + **返回结果类型**:BIGINT。 **适用数据类型**:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。 @@ -1449,6 +1489,8 @@ WEEKDAY(expr) ``` **功能说明**:返回输入日期是周几。 +**版本**: ver-3.3.3.0 + **返回结果类型**:BIGINT。 **适用数据类型**:表示 表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。 @@ -1476,6 +1518,8 @@ DAYOFWEEK(expr) ``` **功能说明**:返回输入日期是周几。 +**版本**: ver-3.3.3.0 + **返回结果类型**:BIGINT。 **适用数据类型**:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。 @@ -1633,6 +1677,9 @@ STDDEV/STDDEV_POP(expr) **适用于**:表和超级表。 +**说明**: +- `STDDEV_POP` 函数等价于 `STDDEV` 函数,从 ver-3.3.3.0 开始支持。 + **举例**: ```sql taos> select id from test_stddev; @@ -1656,6 +1703,8 @@ VAR_POP(expr) **功能说明**:统计表中某列的总体方差。 +**版本**: ver-3.3.3.0 + **返回数据类型**:DOUBLE。 **适用数据类型**:数值类型。 @@ -1898,7 +1947,8 @@ MAX(expr) **适用于**:表和超级表。 -**使用说明**:max 函数可以接受字符串作为输入参数,当输入参数为字符串类型时,返回最大的字符串值。 +**使用说明**: +- max 函数可以接受字符串作为输入参数,当输入参数为字符串类型时,返回最大的字符串值,从 ver-3.3.3.0 开始支持,之前的版本不支持字符串参数。 ### MIN @@ -1914,7 +1964,8 @@ MIN(expr) **适用于**:表和超级表。 -**使用说明**:min 函数可以接受字符串作为输入参数,当输入参数为字符串类型时,返回最大的字符串值。 +**使用说明**: +- min 函数可以接受字符串作为输入参数,当输入参数为字符串类型时,返回最大的字符串值,从 ver-3.3.3.0 开始支持,之前的版本不支持字符串参数。 ### MODE diff --git a/docs/zh/14-reference/03-taos-sql/24-show.md b/docs/zh/14-reference/03-taos-sql/24-show.md index 81f891531f..3898920e65 100644 --- a/docs/zh/14-reference/03-taos-sql/24-show.md +++ b/docs/zh/14-reference/03-taos-sql/24-show.md @@ -155,7 +155,7 @@ SHOW QNODES; SHOW QUERIES; ``` -显示当前系统中正在进行的查询。 +显示当前系统中正在进行的写入(更新)/查询/删除。(由于内部 API 命名原因,所以统称 QUERIES) ## SHOW SCORES diff --git a/docs/zh/14-reference/09-error-code.md b/docs/zh/14-reference/09-error-code.md index 2bebe2406b..51453cef4c 100644 --- a/docs/zh/14-reference/09-error-code.md +++ b/docs/zh/14-reference/09-error-code.md @@ -403,7 +403,7 @@ description: TDengine 服务端的错误码列表和详细说明 | 0x8000260D | Tags number not matched | tag列个数不匹配 | 检查并修正SQL语句 | | 0x8000260E | Invalid tag name | 无效或不存在的tag名 | 检查并修正SQL语句 | | 0x80002610 | Value is too long | 值长度超出限制 | 检查并修正SQL语句或API参数 | -| 0x80002611 | Password can not be empty | 密码为空 | 使用合法的密码 | +| 0x80002611 | Password too short or empty | 密码为空或少于 8 个字符 | 使用合法的密码 | | 0x80002612 | Port should be an integer that is less than 65535 and greater than 0 | 端口号非法 | 检查并修正端口号 | | 0x80002613 | Endpoint should be in the format of 'fqdn:port' | 地址格式错误 | 检查并修正地址信息 | | 0x80002614 | This statement is no longer supported | 功能已经废弃 | 参考功能文档说明 | diff --git a/docs/zh/26-tdinternal/aggquery.png b/docs/zh/26-tdinternal/aggquery.png index 8e2094eb8c..50123b939c 100644 Binary files a/docs/zh/26-tdinternal/aggquery.png and b/docs/zh/26-tdinternal/aggquery.png differ diff --git a/docs/zh/26-tdinternal/cache.png b/docs/zh/26-tdinternal/cache.png index a5ff851a78..acc4569ae5 100644 Binary files a/docs/zh/26-tdinternal/cache.png and b/docs/zh/26-tdinternal/cache.png differ diff --git a/docs/zh/26-tdinternal/compression.png b/docs/zh/26-tdinternal/compression.png index 80f027ffd2..173990a838 100644 Binary files a/docs/zh/26-tdinternal/compression.png and b/docs/zh/26-tdinternal/compression.png differ diff --git a/docs/zh/26-tdinternal/streamarch.png b/docs/zh/26-tdinternal/streamarch.png index 92a2b61d39..5f1b017dad 100644 Binary files a/docs/zh/26-tdinternal/streamarch.png and b/docs/zh/26-tdinternal/streamarch.png differ diff --git a/docs/zh/26-tdinternal/streamtask.png b/docs/zh/26-tdinternal/streamtask.png index fc132c5c76..fa09f55592 100644 Binary files a/docs/zh/26-tdinternal/streamtask.png and b/docs/zh/26-tdinternal/streamtask.png differ diff --git a/docs/zh/26-tdinternal/taskarch.png b/docs/zh/26-tdinternal/taskarch.png index d9fae4908d..37b2369fbb 100644 Binary files a/docs/zh/26-tdinternal/taskarch.png and b/docs/zh/26-tdinternal/taskarch.png differ diff --git a/include/common/tanalytics.h b/include/common/tanalytics.h index d0af84ecfb..6ebdb38fa6 100644 --- a/include/common/tanalytics.h +++ b/include/common/tanalytics.h @@ -28,8 +28,8 @@ extern "C" { #define ANAL_FORECAST_DEFAULT_ROWS 10 #define ANAL_FORECAST_DEFAULT_CONF 95 #define ANAL_FORECAST_DEFAULT_WNCHECK 1 -#define ANAL_FORECAST_MAX_ROWS 10000 -#define ANAL_ANOMALY_WINDOW_MAX_ROWS 10000 +#define ANAL_FORECAST_MAX_ROWS 40000 +#define ANAL_ANOMALY_WINDOW_MAX_ROWS 40000 typedef struct { EAnalAlgoType type; diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h index cb05f98f45..0b34e882c8 100644 --- a/include/common/tdataformat.h +++ b/include/common/tdataformat.h @@ -378,7 +378,7 @@ typedef struct { TAOS_MULTI_BIND *bind; } SBindInfo; int32_t tRowBuildFromBind(SBindInfo *infos, int32_t numOfInfos, bool infoSorted, const STSchema *pTSchema, - SArray *rowArray); + SArray *rowArray, bool *pOrdered, bool *pDupTs); // stmt2 binding int32_t tColDataAddValueByBind2(SColData *pColData, TAOS_STMT2_BIND *pBind, int32_t buffMaxLen, initGeosFn igeos, @@ -392,7 +392,7 @@ typedef struct { } SBindInfo2; int32_t tRowBuildFromBind2(SBindInfo2 *infos, int32_t numOfInfos, bool infoSorted, const STSchema *pTSchema, - SArray *rowArray); + SArray *rowArray, bool *pOrdered, bool *pDupTs); #endif diff --git a/include/common/tglobal.h b/include/common/tglobal.h index e6333d2ddc..584c4b5775 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -114,6 +114,7 @@ extern int32_t tsRetentionSpeedLimitMB; extern const char *tsAlterCompactTaskKeywords; extern int32_t tsNumOfCompactThreads; +extern int32_t tsNumOfRetentionThreads; // sync raft extern int32_t tsElectInterval; @@ -291,6 +292,7 @@ extern bool tsFilterScalarMode; extern int32_t tsMaxStreamBackendCache; extern int32_t tsPQSortMemThreshold; extern int32_t tsResolveFQDNRetryTime; +extern bool tsStreamCoverage; extern bool tsExperimental; // #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize) @@ -323,6 +325,8 @@ void printConfigNotMatch(SArray *array); int32_t compareSConfigItemArrays(SArray *mArray, const SArray *dArray, SArray *diffArray); bool isConifgItemLazyMode(SConfigItem *item); +int32_t taosUpdateTfsItemDisable(SConfig *pCfg, const char *value, void *pTfs); + #ifdef __cplusplus } #endif diff --git a/include/libs/crypt/crypt.h b/include/libs/crypt/crypt.h index 5f981b7ac8..78753874bd 100644 --- a/include/libs/crypt/crypt.h +++ b/include/libs/crypt/crypt.h @@ -26,7 +26,7 @@ typedef struct SCryptOpts { char* source; char* result; int32_t unitLen; - char key[17]; + char key[ENCRYPT_KEY_LEN + 1]; } SCryptOpts; int32_t CBC_Decrypt(SCryptOpts* opts); diff --git a/include/libs/function/function.h b/include/libs/function/function.h index 0ca1962b4e..126ed2c9b0 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -288,6 +288,7 @@ struct SScalarParam { bool colAlloced; SColumnInfoData *columnData; SHashObj *pHashFilter; + SHashObj *pHashFilterOthers; int32_t hashValueType; void *param; // other parameter, such as meta handle from vnode, to extract table name/tag value int32_t numOfRows; diff --git a/include/libs/function/tudf.h b/include/libs/function/tudf.h index 52cb847b6f..2c7e6216f5 100644 --- a/include/libs/function/tudf.h +++ b/include/libs/function/tudf.h @@ -109,8 +109,9 @@ int32_t doCallUdfAggProcess(UdfcFuncHandle handle, SSDataBlock *block, SUdfInter int32_t doCallUdfAggFinalize(UdfcFuncHandle handle, SUdfInterBuf *interBuf, SUdfInterBuf *resultData); // input: interbuf1, interbuf2 // output: resultBuf -int32_t doCallUdfAggMerge(UdfcFuncHandle handle, SUdfInterBuf *interBuf1, SUdfInterBuf *interBuf2, - SUdfInterBuf *resultBuf); +// udf todo: aggmerge +// int32_t doCallUdfAggMerge(UdfcFuncHandle handle, SUdfInterBuf *interBuf1, SUdfInterBuf *interBuf2, +// SUdfInterBuf *resultBuf); // input: block // output: resultData int32_t doCallUdfScalarFunc(UdfcFuncHandle handle, SScalarParam *input, int32_t numOfCols, SScalarParam *output); diff --git a/include/libs/scalar/scalar.h b/include/libs/scalar/scalar.h index fd936dd087..67fd954ad7 100644 --- a/include/libs/scalar/scalar.h +++ b/include/libs/scalar/scalar.h @@ -40,7 +40,7 @@ pDst need to freed in caller int32_t scalarCalculate(SNode *pNode, SArray *pBlockList, SScalarParam *pDst); int32_t scalarGetOperatorParamNum(EOperatorType type); -int32_t scalarGenerateSetFromList(void **data, void *pNode, uint32_t type); +int32_t scalarGenerateSetFromList(void **data, void *pNode, uint32_t type, int8_t processType); int32_t vectorGetConvertType(int32_t type1, int32_t type2); int32_t vectorConvertSingleColImpl(const SScalarParam *pIn, SScalarParam *pOut, int32_t *overflow, int32_t startIndex, int32_t numOfRows); diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 06d99ae856..a4d89dcdcc 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -207,7 +207,6 @@ typedef struct { typedef struct { int32_t nodeId; - SEpSet epset; } SDownstreamTaskEpset; typedef enum { diff --git a/include/libs/tfs/tfs.h b/include/libs/tfs/tfs.h index 446c1d6fd7..a6a3c63a50 100644 --- a/include/libs/tfs/tfs.h +++ b/include/libs/tfs/tfs.h @@ -319,6 +319,15 @@ bool tfsDiskSpaceAvailable(STfs *pTfs, int32_t level); */ bool tfsDiskSpaceSufficient(STfs *pTfs, int32_t level, int32_t disk); +/** + * @brief Update disk size of tfs. + * + * @param pTfs The fs object. + * @param dir The directory. + * @param disable The disable flag. + */ +int32_t tfsUpdateDiskDisable(STfs *pTfs, const char *dir, int8_t disable); + #ifdef __cplusplus } #endif diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 2e9b3a0d7f..890758762a 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -802,7 +802,7 @@ int32_t taosGetErrSize(); #define TSDB_CODE_PAR_TAGS_NOT_MATCHED TAOS_DEF_ERROR_CODE(0, 0x260D) #define TSDB_CODE_PAR_INVALID_TAG_NAME TAOS_DEF_ERROR_CODE(0, 0x260E) #define TSDB_CODE_PAR_NAME_OR_PASSWD_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x2610) -#define TSDB_CODE_PAR_PASSWD_EMPTY TAOS_DEF_ERROR_CODE(0, 0x2611) +#define TSDB_CODE_PAR_PASSWD_TOO_SHORT_OR_EMPTY TAOS_DEF_ERROR_CODE(0, 0x2611) #define TSDB_CODE_PAR_INVALID_PORT TAOS_DEF_ERROR_CODE(0, 0x2612) #define TSDB_CODE_PAR_INVALID_ENDPOINT TAOS_DEF_ERROR_CODE(0, 0x2613) #define TSDB_CODE_PAR_EXPRIE_STATEMENT TAOS_DEF_ERROR_CODE(0, 0x2614) diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index 906a227ad5..9d28b63a15 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -44,27 +44,27 @@ mkdir -p ${pkg_dir}${install_home_path}/include #mkdir -p ${pkg_dir}${install_home_path}/init.d mkdir -p ${pkg_dir}${install_home_path}/script -# download taoskeeper and build -if [ "$cpuType" = "x64" ] || [ "$cpuType" = "x86_64" ] || [ "$cpuType" = "amd64" ]; then - arch=amd64 -elif [ "$cpuType" = "x32" ] || [ "$cpuType" = "i386" ] || [ "$cpuType" = "i686" ]; then - arch=386 -elif [ "$cpuType" = "arm" ] || [ "$cpuType" = "aarch32" ]; then - arch=arm -elif [ "$cpuType" = "arm64" ] || [ "$cpuType" = "aarch64" ]; then - arch=arm64 -else - arch=$cpuType -fi +# # download taoskeeper and build +# if [ "$cpuType" = "x64" ] || [ "$cpuType" = "x86_64" ] || [ "$cpuType" = "amd64" ]; then +# arch=amd64 +# elif [ "$cpuType" = "x32" ] || [ "$cpuType" = "i386" ] || [ "$cpuType" = "i686" ]; then +# arch=386 +# elif [ "$cpuType" = "arm" ] || [ "$cpuType" = "aarch32" ]; then +# arch=arm +# elif [ "$cpuType" = "arm64" ] || [ "$cpuType" = "aarch64" ]; then +# arch=arm64 +# else +# arch=$cpuType +# fi -echo "${top_dir}/../enterprise/packaging/build_taoskeeper.sh -r ${arch} -e taoskeeper -t ver-${tdengine_ver}" -echo "$top_dir=${top_dir}" -taoskeeper_binary=`${top_dir}/../enterprise/packaging/build_taoskeeper.sh -r $arch -e taoskeeper -t ver-${tdengine_ver}` -echo "taoskeeper_binary: ${taoskeeper_binary}" +# echo "${top_dir}/../enterprise/packaging/build_taoskeeper.sh -r ${arch} -e taoskeeper -t ver-${tdengine_ver}" +# echo "$top_dir=${top_dir}" +# taoskeeper_binary=`${top_dir}/../enterprise/packaging/build_taoskeeper.sh -r $arch -e taoskeeper -t ver-${tdengine_ver}` +# echo "taoskeeper_binary: ${taoskeeper_binary}" # copy config files -cp $(dirname ${taoskeeper_binary})/config/taoskeeper.toml ${pkg_dir}${install_home_path}/cfg -cp $(dirname ${taoskeeper_binary})/taoskeeper.service ${pkg_dir}${install_home_path}/cfg +# cp $(dirname ${taoskeeper_binary})/config/taoskeeper.toml ${pkg_dir}${install_home_path}/cfg +# cp $(dirname ${taoskeeper_binary})/taoskeeper.service ${pkg_dir}${install_home_path}/cfg cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg cp ${compile_dir}/../packaging/cfg/taosd.service ${pkg_dir}${install_home_path}/cfg @@ -75,7 +75,12 @@ fi if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then cp ${compile_dir}/test/cfg/taosadapter.service ${pkg_dir}${install_home_path}/cfg || : fi - +if [ -f "${compile_dir}/test/cfg/taoskeeper.toml" ]; then + cp ${compile_dir}/test/cfg/taoskeeper.toml ${pkg_dir}${install_home_path}/cfg || : +fi +if [ -f "${compile_dir}/test/cfg/taoskeeper.service" ]; then + cp ${compile_dir}/test/cfg/taoskeeper.service ${pkg_dir}${install_home_path}/cfg || : +fi if [ -f "${compile_dir}/../../../explorer/target/taos-explorer.service" ]; then cp ${compile_dir}/../../../explorer/target/taos-explorer.service ${pkg_dir}${install_home_path}/cfg || : fi @@ -83,7 +88,7 @@ if [ -f "${compile_dir}/../../../explorer/server/example/explorer.toml" ]; then cp ${compile_dir}/../../../explorer/server/example/explorer.toml ${pkg_dir}${install_home_path}/cfg || : fi -cp ${taoskeeper_binary} ${pkg_dir}${install_home_path}/bin +# cp ${taoskeeper_binary} ${pkg_dir}${install_home_path}/bin #cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script @@ -104,6 +109,9 @@ cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path if [ -f "${compile_dir}/build/bin/taosadapter" ]; then cp ${compile_dir}/build/bin/taosadapter ${pkg_dir}${install_home_path}/bin ||: fi +if [ -f "${compile_dir}/build/bin/taoskeeper" ]; then + cp ${compile_dir}/build/bin/taoskeeper ${pkg_dir}${install_home_path}/bin ||: +fi if [ -f "${compile_dir}/../../../explorer/target/release/taos-explorer" ]; then cp ${compile_dir}/../../../explorer/target/release/taos-explorer ${pkg_dir}${install_home_path}/bin ||: @@ -185,7 +193,7 @@ else exit 1 fi -rm -rf ${pkg_dir}/build-taoskeeper +# rm -rf ${pkg_dir}/build-taoskeeper # make deb package dpkg -b ${pkg_dir} $debname echo "make deb package success!" diff --git a/packaging/rpm/makerpm.sh b/packaging/rpm/makerpm.sh index f895193b6b..091e056a79 100755 --- a/packaging/rpm/makerpm.sh +++ b/packaging/rpm/makerpm.sh @@ -56,24 +56,23 @@ fi ${csudo}mkdir -p ${pkg_dir} cd ${pkg_dir} -# download taoskeeper and build -if [ "$cpuType" = "x64" ] || [ "$cpuType" = "x86_64" ] || [ "$cpuType" = "amd64" ]; then - arch=amd64 -elif [ "$cpuType" = "x32" ] || [ "$cpuType" = "i386" ] || [ "$cpuType" = "i686" ]; then - arch=386 -elif [ "$cpuType" = "arm" ] || [ "$cpuType" = "aarch32" ]; then - arch=arm -elif [ "$cpuType" = "arm64" ] || [ "$cpuType" = "aarch64" ]; then - arch=arm64 -else - arch=$cpuType -fi +# # download taoskeeper and build +# if [ "$cpuType" = "x64" ] || [ "$cpuType" = "x86_64" ] || [ "$cpuType" = "amd64" ]; then +# arch=amd64 +# elif [ "$cpuType" = "x32" ] || [ "$cpuType" = "i386" ] || [ "$cpuType" = "i686" ]; then +# arch=386 +# elif [ "$cpuType" = "arm" ] || [ "$cpuType" = "aarch32" ]; then +# arch=arm +# elif [ "$cpuType" = "arm64" ] || [ "$cpuType" = "aarch64" ]; then +# arch=arm64 +# else +# arch=$cpuType +# fi -cd ${top_dir} -echo "${top_dir}/../enterprise/packaging/build_taoskeeper.sh -r ${arch} -e taoskeeper -t ver-${tdengine_ver}" -taoskeeper_binary=`${top_dir}/../enterprise/packaging/build_taoskeeper.sh -r $arch -e taoskeeper -t ver-${tdengine_ver}` -echo "taoskeeper_binary: ${taoskeeper_binary}" -cd ${package_dir} +# cd ${top_dir} +# echo "${top_dir}/../enterprise/packaging/build_taoskeeper.sh -r ${arch} -e taoskeeper -t ver-${tdengine_ver}" +# taoskeeper_binary=`${top_dir}/../enterprise/packaging/build_taoskeeper.sh -r $arch -e taoskeeper -t ver-${tdengine_ver}` +# echo "taoskeeper_binary: ${taoskeeper_binary}" ${csudo}mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS @@ -106,4 +105,4 @@ mv ${output_dir}/TDengine-${tdengine_ver}.rpm ${output_dir}/${rpmname} cd .. ${csudo}rm -rf ${pkg_dir} -rm -rf ${top_dir}/build-taoskeeper \ No newline at end of file +# rm -rf ${top_dir}/build-taoskeeper \ No newline at end of file diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index 3e23e29a40..c8a6270456 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -68,12 +68,12 @@ if [ -f %{_compiledir}/test/cfg/taosadapter.service ]; then cp %{_compiledir}/test/cfg/taosadapter.service %{buildroot}%{homepath}/cfg fi -if [ -f %{_compiledir}/../build-taoskeeper/config/taoskeeper.toml ]; then - cp %{_compiledir}/../build-taoskeeper/config/taoskeeper.toml %{buildroot}%{homepath}/cfg ||: +if [ -f %{_compiledir}/test/cfg/taoskeeper.toml ]; then + cp %{_compiledir}/test/cfg/taoskeeper.toml %{buildroot}%{homepath}/cfg ||: fi -if [ -f %{_compiledir}/../build-taoskeeper/taoskeeper.service ]; then - cp %{_compiledir}/../build-taoskeeper/taoskeeper.service %{buildroot}%{homepath}/cfg ||: +if [ -f %{_compiledir}/test/cfg/taoskeeper.service ]; then + cp %{_compiledir}/test/cfg/taoskeeper.service %{buildroot}%{homepath}/cfg ||: fi if [ -f %{_compiledir}/../../../explorer/target/taos-explorer.service ]; then @@ -104,8 +104,8 @@ if [ -f %{_compiledir}/../../../explorer/target/release/taos-explorer ]; then cp %{_compiledir}/../../../explorer/target/release/taos-explorer %{buildroot}%{homepath}/bin fi -if [ -f %{_compiledir}/../build-taoskeeper/taoskeeper ]; then - cp %{_compiledir}/../build-taoskeeper/taoskeeper %{buildroot}%{homepath}/bin +if [ -f %{_compiledir}/build/bin//taoskeeper ]; then + cp %{_compiledir}/build/bin//taoskeeper %{buildroot}%{homepath}/bin fi if [ -f %{_compiledir}/build/bin/taosadapter ]; then diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index d67d436fa7..87f4f57fd3 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -282,5 +282,3 @@ else rm -rf ${install_dir} ||: # mv ../"$(basename ${pkg_name}).tar.gz" . fi - -cd ${curr_dir} diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index dfc18da8cd..f041e4b030 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -2168,17 +2168,41 @@ int taos_stmt2_bind_param(TAOS_STMT2 *stmt, TAOS_STMT2_BINDV *bindv, int32_t col pStmt->semWaited = true; } - int32_t code = 0; + SSHashObj *hashTbnames = tSimpleHashInit(100, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR)); + if (NULL == hashTbnames) { + tscError("stmt2 bind failed: %s", tstrerror(terrno)); + return terrno; + } + + int32_t code = TSDB_CODE_SUCCESS; for (int i = 0; i < bindv->count; ++i) { if (bindv->tbnames && bindv->tbnames[i]) { + if (pStmt->sql.stbInterlaceMode) { + if (tSimpleHashGet(hashTbnames, bindv->tbnames[i], strlen(bindv->tbnames[i])) != NULL) { + code = terrno = TSDB_CODE_PAR_TBNAME_DUPLICATED; + tscError("stmt2 bind failed: %s %s", tstrerror(terrno), bindv->tbnames[i]); + goto out; + } + + code = tSimpleHashPut(hashTbnames, bindv->tbnames[i], strlen(bindv->tbnames[i]), NULL, 0); + if (code) { + goto out; + } + } + code = stmtSetTbName2(stmt, bindv->tbnames[i]); if (code) { - return code; + goto out; } } if (bindv->tags && bindv->tags[i]) { code = stmtSetTbTags2(stmt, bindv->tags[i]); + if (code) { + goto out; + } + } else if (pStmt->bInfo.tbType == TSDB_CHILD_TABLE && pStmt->sql.autoCreateTbl) { + code = stmtSetTbTags2(stmt, NULL); if (code) { return code; } @@ -2189,26 +2213,29 @@ int taos_stmt2_bind_param(TAOS_STMT2 *stmt, TAOS_STMT2_BINDV *bindv, int32_t col if (bind->num <= 0 || bind->num > INT16_MAX) { tscError("invalid bind num %d", bind->num); - terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR; - return terrno; + code = terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR; + goto out; } int32_t insert = 0; (void)stmtIsInsert2(stmt, &insert); if (0 == insert && bind->num > 1) { tscError("only one row data allowed for query"); - terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR; - return terrno; + code = terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR; + goto out; } code = stmtBindBatch2(stmt, bind, col_idx); if (TSDB_CODE_SUCCESS != code) { - return code; + goto out; } } } - return TSDB_CODE_SUCCESS; +out: + tSimpleHashCleanup(hashTbnames); + + return code; } int taos_stmt2_exec(TAOS_STMT2 *stmt, int *affected_rows) { diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 830144c12a..613645c4cd 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -68,10 +68,19 @@ static bool tmqAddJsonArrayItem(cJSON *array, cJSON *item){ } -static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen); -static tb_uid_t processSuid(tb_uid_t suid, char* db) { return suid + MurmurHash3_32(db, strlen(db)); } +static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, uint32_t metaLen); +static tb_uid_t processSuid(tb_uid_t suid, char* db) { + if (db == NULL) { + return suid; + } + return suid + MurmurHash3_32(db, strlen(db)); +} static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id, int8_t t, SColCmprWrapper* pColCmprRow, cJSON** pJson) { + if (schemaRow == NULL || name == NULL || pColCmprRow == NULL || pJson == NULL) { + uError("invalid parameter, schemaRow:%p, name:%p, pColCmprRow:%p, pJson:%p", schemaRow, name, pColCmprRow, pJson); + return; + } int32_t code = TSDB_CODE_SUCCESS; int8_t buildDefaultCompress = 0; if (pColCmprRow->nCols <= 0) { @@ -186,6 +195,9 @@ end: } static int32_t setCompressOption(cJSON* json, uint32_t para) { + if (json == NULL) { + return TSDB_CODE_INVALID_PARA; + } uint8_t encode = COMPRESS_L1_TYPE_U32(para); int32_t code = 0; if (encode != 0) { @@ -219,6 +231,10 @@ end: return code; } static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON** pJson) { + if (alterData == NULL || pJson == NULL) { + uError("invalid parameter in %s", __func__); + return; + } SMAlterStbReq req = {0}; cJSON* json = NULL; char* string = NULL; @@ -362,6 +378,10 @@ end: } static void processCreateStb(SMqMetaRsp* metaRsp, cJSON** pJson) { + if (metaRsp == NULL || pJson == NULL) { + uError("invalid parameter in %s", __func__); + return; + } SVCreateStbReq req = {0}; SDecoder coder; @@ -382,6 +402,10 @@ end: } static void processAlterStb(SMqMetaRsp* metaRsp, cJSON** pJson) { + if (metaRsp == NULL || pJson == NULL) { + uError("invalid parameter in %s", __func__); + return; + } SVCreateStbReq req = {0}; SDecoder coder = {0}; uDebug("alter stable data:%p", metaRsp); @@ -402,6 +426,10 @@ end: } static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) { + if (json == NULL || pCreateReq == NULL) { + uError("invalid parameter in %s", __func__); + return; + } STag* pTag = (STag*)pCreateReq->ctb.pTag; char* sname = pCreateReq->ctb.stbName; char* name = pCreateReq->name; @@ -502,6 +530,10 @@ end: } static void buildCreateCTableJson(SVCreateTbReq* pCreateReq, int32_t nReqs, cJSON** pJson) { + if (pJson == NULL || pCreateReq == NULL) { + uError("invalid parameter in %s", __func__); + return; + } int32_t code = 0; char* string = NULL; cJSON* json = cJSON_CreateObject(); @@ -531,6 +563,10 @@ end: } static void processCreateTable(SMqMetaRsp* metaRsp, cJSON** pJson) { + if (pJson == NULL || metaRsp == NULL) { + uError("invalid parameter in %s", __func__); + return; + } SDecoder decoder = {0}; SVCreateTbBatchReq req = {0}; SVCreateTbReq* pCreateReq; @@ -561,6 +597,10 @@ end: } static void processAutoCreateTable(SMqDataRsp* rsp, char** string) { + if (rsp == NULL || string == NULL) { + uError("invalid parameter in %s", __func__); + return; + } SDecoder* decoder = NULL; SVCreateTbReq* pCreateReq = NULL; int32_t code = 0; @@ -611,6 +651,10 @@ end: } static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) { + if (pJson == NULL || metaRsp == NULL) { + uError("invalid parameter in %s", __func__); + return; + } SDecoder decoder = {0}; SVAlterTbReq vAlterTbReq = {0}; char* string = NULL; @@ -850,6 +894,10 @@ end: } static void processDropSTable(SMqMetaRsp* metaRsp, cJSON** pJson) { + if (pJson == NULL || metaRsp == NULL) { + uError("invalid parameter in %s", __func__); + return; + } SDecoder decoder = {0}; SVDropStbReq req = {0}; cJSON* json = NULL; @@ -884,6 +932,10 @@ end: *pJson = json; } static void processDeleteTable(SMqMetaRsp* metaRsp, cJSON** pJson) { + if (pJson == NULL || metaRsp == NULL) { + uError("invalid parameter in %s", __func__); + return; + } SDeleteRes req = {0}; SDecoder coder = {0}; cJSON* json = NULL; @@ -921,6 +973,10 @@ end: } static void processDropTable(SMqMetaRsp* metaRsp, cJSON** pJson) { + if (pJson == NULL || metaRsp == NULL) { + uError("invalid parameter in %s", __func__); + return; + } SDecoder decoder = {0}; SVDropTbBatchReq req = {0}; cJSON* json = NULL; @@ -958,7 +1014,11 @@ end: *pJson = json; } -static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) { +static int32_t taosCreateStb(TAOS* taos, void* meta, uint32_t metaLen) { + if (taos == NULL || meta == NULL) { + uError("invalid parameter in %s", __func__); + return TSDB_CODE_INVALID_PARA; + } SVCreateStbReq req = {0}; SDecoder coder; SMCreateStbReq pReq = {0}; @@ -973,8 +1033,8 @@ static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) { goto end; } // decode and process req - void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); - int32_t len = metaLen - sizeof(SMsgHead); + void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); + uint32_t len = metaLen - sizeof(SMsgHead); tDecoderInit(&coder, data, len); if (tDecodeSVCreateStbReq(&coder, &req) < 0) { code = TSDB_CODE_INVALID_PARA; @@ -1068,7 +1128,11 @@ end: return code; } -static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) { +static int32_t taosDropStb(TAOS* taos, void* meta, uint32_t metaLen) { + if (taos == NULL || meta == NULL) { + uError("invalid parameter in %s", __func__); + return TSDB_CODE_INVALID_PARA; + } SVDropStbReq req = {0}; SDecoder coder = {0}; SMDropStbReq pReq = {0}; @@ -1083,8 +1147,8 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) { goto end; } // decode and process req - void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); - int32_t len = metaLen - sizeof(SMsgHead); + void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); + uint32_t len = metaLen - sizeof(SMsgHead); tDecoderInit(&coder, data, len); if (tDecodeSVDropStbReq(&coder, &req) < 0) { code = TSDB_CODE_INVALID_PARA; @@ -1173,11 +1237,19 @@ typedef struct SVgroupCreateTableBatch { } SVgroupCreateTableBatch; static void destroyCreateTbReqBatch(void* data) { + if (data == NULL) { + uError("invalid parameter in %s", __func__); + return; + } SVgroupCreateTableBatch* pTbBatch = (SVgroupCreateTableBatch*)data; taosArrayDestroy(pTbBatch->req.pArray); } -static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { +static int32_t taosCreateTable(TAOS* taos, void* meta, uint32_t metaLen) { + if (taos == NULL || meta == NULL) { + uError("invalid parameter in %s", __func__); + return TSDB_CODE_INVALID_PARA; + } SVCreateTbBatchReq req = {0}; SDecoder coder = {0}; int32_t code = TSDB_CODE_SUCCESS; @@ -1195,8 +1267,8 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { goto end; } // decode and process req - void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); - int32_t len = metaLen - sizeof(SMsgHead); + void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); + uint32_t len = metaLen - sizeof(SMsgHead); tDecoderInit(&coder, data, len); if (tDecodeSVCreateTbBatchReq(&coder, &req) < 0) { code = TSDB_CODE_INVALID_PARA; @@ -1359,11 +1431,19 @@ typedef struct SVgroupDropTableBatch { } SVgroupDropTableBatch; static void destroyDropTbReqBatch(void* data) { + if (data == NULL) { + uError("invalid parameter in %s", __func__); + return; + } SVgroupDropTableBatch* pTbBatch = (SVgroupDropTableBatch*)data; taosArrayDestroy(pTbBatch->req.pArray); } -static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) { +static int32_t taosDropTable(TAOS* taos, void* meta, uint32_t metaLen) { + if (taos == NULL || meta == NULL) { + uError("invalid parameter in %s", __func__); + return TSDB_CODE_INVALID_PARA; + } SVDropTbBatchReq req = {0}; SDecoder coder = {0}; int32_t code = TSDB_CODE_SUCCESS; @@ -1380,8 +1460,8 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) { goto end; } // decode and process req - void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); - int32_t len = metaLen - sizeof(SMsgHead); + void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); + uint32_t len = metaLen - sizeof(SMsgHead); tDecoderInit(&coder, data, len); if (tDecodeSVDropTbBatchReq(&coder, &req) < 0) { code = TSDB_CODE_INVALID_PARA; @@ -1475,7 +1555,11 @@ end: return code; } -static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) { +static int32_t taosDeleteData(TAOS* taos, void* meta, uint32_t metaLen) { + if (taos == NULL || meta == NULL) { + uError("invalid parameter in %s", __func__); + return TSDB_CODE_INVALID_PARA; + } SDeleteRes req = {0}; SDecoder coder = {0}; char sql[256] = {0}; @@ -1484,8 +1568,8 @@ static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) { uDebug("connId:0x%" PRIx64 " delete data, meta:%p, len:%d", *(int64_t*)taos, meta, metaLen); // decode and process req - void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); - int32_t len = metaLen - sizeof(SMsgHead); + void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); + uint32_t len = metaLen - sizeof(SMsgHead); tDecoderInit(&coder, data, len); if (tDecodeDeleteRes(&coder, &req) < 0) { code = TSDB_CODE_INVALID_PARA; @@ -1510,7 +1594,11 @@ end: return code; } -static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) { +static int32_t taosAlterTable(TAOS* taos, void* meta, uint32_t metaLen) { + if (taos == NULL || meta == NULL) { + uError("invalid parameter in %s", __func__); + return TSDB_CODE_INVALID_PARA; + } SVAlterTbReq req = {0}; SDecoder dcoder = {0}; int32_t code = TSDB_CODE_SUCCESS; @@ -1527,8 +1615,8 @@ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) { goto end; } // decode and process req - void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); - int32_t len = metaLen - sizeof(SMsgHead); + void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); + uint32_t len = metaLen - sizeof(SMsgHead); tDecoderInit(&dcoder, data, len); if (tDecodeSVAlterTbReq(&dcoder, &req) < 0) { code = TSDB_CODE_INVALID_PARA; @@ -1632,7 +1720,8 @@ int taos_write_raw_block_with_fields(TAOS* taos, int rows, char* pData, const ch int taos_write_raw_block_with_fields_with_reqid(TAOS* taos, int rows, char* pData, const char* tbname, TAOS_FIELD* fields, int numFields, int64_t reqid) { - if (!taos || !pData || !tbname) { + if (taos == NULL || pData == NULL || tbname == NULL) { + uError("invalid parameter in %s", __func__); return TSDB_CODE_INVALID_PARA; } int32_t code = TSDB_CODE_SUCCESS; @@ -1693,7 +1782,7 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname) } int taos_write_raw_block_with_reqid(TAOS* taos, int rows, char* pData, const char* tbname, int64_t reqid) { - if (!taos || !pData || !tbname) { + if (taos == NULL || pData == NULL || tbname == NULL) { return TSDB_CODE_INVALID_PARA; } int32_t code = TSDB_CODE_SUCCESS; @@ -1749,6 +1838,10 @@ end: } static void* getRawDataFromRes(void* pRetrieve) { + if (pRetrieve == NULL) { + uError("invalid parameter in %s", __func__); + return NULL; + } void* rawData = NULL; // deal with compatibility if (*(int64_t*)pRetrieve == 0) { @@ -1760,6 +1853,10 @@ static void* getRawDataFromRes(void* pRetrieve) { } static int32_t buildCreateTbMap(SMqDataRsp* rsp, SHashObj* pHashObj) { + if (rsp == NULL || pHashObj == NULL) { + uError("invalid parameter in %s", __func__); + return TSDB_CODE_INVALID_PARA; + } // find schema data info int32_t code = 0; SVCreateTbReq pCreateReq = {0}; @@ -1819,11 +1916,19 @@ typedef struct { } tbInfo; static void tmqFreeMeta(void* data) { + if (data == NULL) { + uError("invalid parameter in %s", __func__); + return; + } STableMeta* pTableMeta = *(STableMeta**)data; taosMemoryFree(pTableMeta); } static void freeRawCache(void* data) { + if (data == NULL) { + uError("invalid parameter in %s", __func__); + return; + } rawCacheInfo* pRawCache = (rawCacheInfo*)data; taosHashCleanup(pRawCache->pMetaHash); taosHashCleanup(pRawCache->pNameHash); @@ -1842,6 +1947,10 @@ static int32_t initRawCacheHash() { } static bool needRefreshMeta(void* rawData, STableMeta* pTableMeta, SSchemaWrapper* pSW) { + if (rawData == NULL || pTableMeta == NULL || pSW == NULL) { + uError("invalid parameter in %s", __func__); + return false; + } char* p = (char*)rawData; // | version | total length | total rows | blankFill | total columns | flag seg| block group id | column schema | each // column length | @@ -1877,6 +1986,10 @@ static bool needRefreshMeta(void* rawData, STableMeta* pTableMeta, SSchemaWrappe } static int32_t getRawCache(SHashObj** pVgHash, SHashObj** pNameHash, SHashObj** pMetaHash, void* key) { + if (pVgHash == NULL || pNameHash == NULL || pMetaHash == NULL || key == NULL) { + uError("invalid parameter in %s", __func__); + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; void* cacheInfo = taosHashGet(writeRawCache, &key, POINTER_BYTES); if (cacheInfo == NULL) { @@ -1905,6 +2018,10 @@ end: } static int32_t buildRawRequest(TAOS* taos, SRequestObj** pRequest, SCatalog** pCatalog, SRequestConnInfo* conn) { + if (taos == NULL || pRequest == NULL || pCatalog == NULL || conn == NULL) { + uError("invalid parameter in %s", __func__); + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; RAW_RETURN_CHECK(buildRequest(*(int64_t*)taos, "", 0, NULL, false, pRequest, 0)); (*pRequest)->syncQuery = true; @@ -1924,26 +2041,38 @@ end: } typedef int32_t _raw_decode_func_(SDecoder* pDecoder, SMqDataRsp* pRsp); -static int32_t decodeRawData(SDecoder* decoder, void* data, int32_t dataLen, _raw_decode_func_ func, +static int32_t decodeRawData(SDecoder* decoder, void* data, uint32_t dataLen, _raw_decode_func_ func, SMqRspObj* rspObj) { - int8_t dataVersion = *(int8_t*)data; - if (dataVersion >= MQ_DATA_RSP_VERSION) { - data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t)); - dataLen -= sizeof(int8_t) + sizeof(int32_t); + if (decoder == NULL || data == NULL || func == NULL || rspObj == NULL) { + uError("invalid parameter in %s", __func__); + return TSDB_CODE_INVALID_PARA; + } + int8_t dataVersion = *(int8_t*)data; + if (dataVersion >= MQ_DATA_RSP_VERSION) { + data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t)); + if (dataLen < sizeof(int8_t) + sizeof(int32_t)) { + return TSDB_CODE_INVALID_PARA; + } + dataLen -= sizeof(int8_t) + sizeof(int32_t); } - rspObj->resIter = -1; - tDecoderInit(decoder, data, dataLen); - int32_t code = func(decoder, &rspObj->dataRsp); - if (code != 0) { - SET_ERROR_MSG("decode mq taosx data rsp failed"); + rspObj->resIter = -1; + tDecoderInit(decoder, data, dataLen); + int32_t code = func(decoder, &rspObj->dataRsp); + if (code != 0) { + SET_ERROR_MSG("decode mq taosx data rsp failed"); } - return code; + return code; } static int32_t processCacheMeta(SHashObj* pVgHash, SHashObj* pNameHash, SHashObj* pMetaHash, SVCreateTbReq* pCreateReqDst, SCatalog* pCatalog, SRequestConnInfo* conn, SName* pName, STableMeta** pMeta, SSchemaWrapper* pSW, void* rawData, int32_t retry) { + if (pVgHash == NULL || pNameHash == NULL || pMetaHash == NULL || pCatalog == NULL || conn == NULL || pName == NULL || + pMeta == NULL || pSW == NULL || rawData == NULL) { + uError("invalid parameter in %s", __func__); + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; STableMeta* pTableMeta = NULL; tbInfo* tmpInfo = (tbInfo*)taosHashGet(pNameHash, pName->tname, strlen(pName->tname)); @@ -2000,7 +2129,11 @@ end: return code; } -static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) { +static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, uint32_t dataLen) { + if (taos == NULL || data == NULL) { + uError("invalid parameter in %s", __func__); + return TSDB_CODE_INVALID_PARA; + } int32_t code = TSDB_CODE_SUCCESS; SQuery* pQuery = NULL; SMqRspObj rspObj = {0}; @@ -2073,7 +2206,11 @@ end: return code; } -static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) { +static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, uint32_t dataLen) { + if (taos == NULL || data == NULL) { + uError("invalid parameter in %s", __func__); + return TSDB_CODE_INVALID_PARA; + } int32_t code = TSDB_CODE_SUCCESS; SQuery* pQuery = NULL; SMqRspObj rspObj = {0}; @@ -2162,6 +2299,10 @@ end: } static void processSimpleMeta(SMqMetaRsp* pMetaRsp, cJSON** meta) { + if (pMetaRsp == NULL || meta == NULL) { + uError("invalid parameter in %s", __func__); + return; + } if (pMetaRsp->resMsgType == TDMT_VND_CREATE_STB) { processCreateStb(pMetaRsp, meta); } else if (pMetaRsp->resMsgType == TDMT_VND_ALTER_STB) { @@ -2182,6 +2323,10 @@ static void processSimpleMeta(SMqMetaRsp* pMetaRsp, cJSON** meta) { } static void processBatchMetaToJson(SMqBatchMetaRsp* pMsgRsp, char** string) { + if (pMsgRsp == NULL || string == NULL) { + uError("invalid parameter in %s", __func__); + return; + } SDecoder coder; SMqBatchMetaRsp rsp = {0}; int32_t code = 0; @@ -2228,7 +2373,10 @@ end: } char* tmq_get_json_meta(TAOS_RES* res) { - if (res == NULL) return NULL; + if (res == NULL) { + uError("invalid parameter in %s", __func__); + return NULL; + } uDebug("tmq_get_json_meta res:%p", res); if (!TD_RES_TMQ_META(res) && !TD_RES_TMQ_METADATA(res) && !TD_RES_TMQ_BATCH_META(res)) { return NULL; @@ -2256,6 +2404,10 @@ char* tmq_get_json_meta(TAOS_RES* res) { void tmq_free_json_meta(char* jsonMeta) { taosMemoryFreeClear(jsonMeta); } static int32_t getOffSetLen(const SMqDataRsp* pRsp) { + if (pRsp == NULL) { + uError("invalid parameter in %s", __func__); + return TSDB_CODE_INVALID_PARA; + } SEncoder coder = {0}; tEncoderInit(&coder, NULL, 0); if (tEncodeSTqOffsetVal(&coder, &pRsp->reqOffset) < 0) return -1; @@ -2267,44 +2419,48 @@ static int32_t getOffSetLen(const SMqDataRsp* pRsp) { typedef int32_t __encode_func__(SEncoder* pEncoder, const SMqDataRsp* pRsp); static int32_t encodeMqDataRsp(__encode_func__* encodeFunc, SMqDataRsp* rspObj, tmq_raw_data* raw) { - int32_t len = 0; - int32_t code = 0; - SEncoder encoder = {0}; - void* buf = NULL; - tEncodeSize(encodeFunc, rspObj, len, code); - if (code < 0) { - code = TSDB_CODE_INVALID_MSG; - goto FAILED; + if (raw == NULL || encodeFunc == NULL || rspObj == NULL) { + uError("invalid parameter in %s", __func__); + return TSDB_CODE_INVALID_PARA; } - len += sizeof(int8_t) + sizeof(int32_t); - buf = taosMemoryCalloc(1, len); - if (buf == NULL) { - code = terrno; - goto FAILED; + uint32_t len = 0; + int32_t code = 0; + SEncoder encoder = {0}; + void* buf = NULL; + tEncodeSize(encodeFunc, rspObj, len, code); + if (code < 0) { + code = TSDB_CODE_INVALID_MSG; + goto FAILED; } - tEncoderInit(&encoder, buf, len); - if (tEncodeI8(&encoder, MQ_DATA_RSP_VERSION) < 0) { - code = TSDB_CODE_INVALID_MSG; - goto FAILED; + len += sizeof(int8_t) + sizeof(int32_t); + buf = taosMemoryCalloc(1, len); + if (buf == NULL) { + code = terrno; + goto FAILED; } - int32_t offsetLen = getOffSetLen(rspObj); - if (offsetLen <= 0) { - code = TSDB_CODE_INVALID_MSG; - goto FAILED; + tEncoderInit(&encoder, buf, len); + if (tEncodeI8(&encoder, MQ_DATA_RSP_VERSION) < 0) { + code = TSDB_CODE_INVALID_MSG; + goto FAILED; } - if (tEncodeI32(&encoder, offsetLen) < 0) { - code = TSDB_CODE_INVALID_MSG; - goto FAILED; + int32_t offsetLen = getOffSetLen(rspObj); + if (offsetLen <= 0) { + code = TSDB_CODE_INVALID_MSG; + goto FAILED; } - if (encodeFunc(&encoder, rspObj) < 0) { - code = TSDB_CODE_INVALID_MSG; - goto FAILED; + if (tEncodeI32(&encoder, offsetLen) < 0) { + code = TSDB_CODE_INVALID_MSG; + goto FAILED; } - tEncoderClear(&encoder); + if (encodeFunc(&encoder, rspObj) < 0) { + code = TSDB_CODE_INVALID_MSG; + goto FAILED; + } + tEncoderClear(&encoder); - raw->raw = buf; - raw->raw_len = len; - return code; + raw->raw = buf; + raw->raw_len = len; + return code; FAILED: tEncoderClear(&encoder); taosMemoryFree(buf); @@ -2312,13 +2468,14 @@ FAILED: } int32_t tmq_get_raw(TAOS_RES* res, tmq_raw_data* raw) { - if (!raw || !res) { + if (raw == NULL || res == NULL) { + uError("invalid parameter in %s", __func__); return TSDB_CODE_INVALID_PARA; } SMqRspObj* rspObj = ((SMqRspObj*)res); if (TD_RES_TMQ_META(res)) { raw->raw = rspObj->metaRsp.metaRsp; - raw->raw_len = rspObj->metaRsp.metaRspLen; + raw->raw_len = rspObj->metaRsp.metaRspLen >= 0 ? rspObj->metaRsp.metaRspLen : 0; raw->raw_type = rspObj->metaRsp.resMsgType; uDebug("tmq get raw type meta:%p", raw); } else if (TD_RES_TMQ(res)) { @@ -2378,6 +2535,10 @@ static int32_t writeRawInit() { } static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type) { + if (taos == NULL || buf == NULL) { + uError("invalid parameter in %s", __func__); + return TSDB_CODE_INVALID_PARA; + } if (writeRawInit() != 0) { return TSDB_CODE_INTERNAL_ERROR; } @@ -2415,8 +2576,9 @@ int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) { return writeRawImpl(taos, raw.raw, raw.raw_len, raw.raw_type); } -static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen) { +static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, uint32_t metaLen) { if (taos == NULL || meta == NULL) { + uError("invalid parameter in %s", __func__); return TSDB_CODE_INVALID_PARA; } SMqBatchMetaRsp rsp = {0}; diff --git a/source/client/src/clientStmt2.c b/source/client/src/clientStmt2.c index 89a0fd3788..acd118acc9 100644 --- a/source/client/src/clientStmt2.c +++ b/source/client/src/clientStmt2.c @@ -77,7 +77,7 @@ static int32_t stmtCreateRequest(STscStmt2* pStmt) { } if (pStmt->db != NULL) { taosMemoryFreeClear(pStmt->exec.pRequest->pDb); - pStmt->exec.pRequest->pDb = strdup(pStmt->db); + pStmt->exec.pRequest->pDb = taosStrdup(pStmt->db); } if (TSDB_CODE_SUCCESS == code) { pStmt->exec.pRequest->syncQuery = true; @@ -1012,10 +1012,10 @@ int stmtSetTbTags2(TAOS_STMT2* stmt, TAOS_STMT2_BIND* tags) { } SBoundColInfo* tags_info = (SBoundColInfo*)pStmt->bInfo.boundTags; - if (tags_info->numOfBound <= 0 || tags_info->numOfCols <= 0) { - tscWarn("no tags or cols bound in sql, will not bound tags"); - return TSDB_CODE_SUCCESS; - } + // if (tags_info->numOfBound <= 0 || tags_info->numOfCols <= 0) { + // tscWarn("no tags or cols bound in sql, will not bound tags"); + // return TSDB_CODE_SUCCESS; + // } STableDataCxt** pDataBlock = (STableDataCxt**)taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName)); diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index b254f6eb8b..0cbdfc13e0 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -554,6 +554,9 @@ char** tmq_list_to_c_array(const tmq_list_t* list) { } static int32_t tmqCommitDone(SMqCommitCbParamSet* pParamSet) { + if (pParamSet == NULL) { + return TSDB_CODE_INVALID_PARA; + } int64_t refId = pParamSet->refId; int32_t code = 0; tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId); @@ -575,6 +578,9 @@ static int32_t tmqCommitDone(SMqCommitCbParamSet* pParamSet) { } static int32_t commitRspCountDown(SMqCommitCbParamSet* pParamSet, int64_t consumerId, const char* pTopic, int32_t vgId) { + if (pParamSet == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t waitingRspNum = atomic_sub_fetch_32(&pParamSet->waitingRspNum, 1); if (waitingRspNum == 0) { tqDebugC("consumer:0x%" PRIx64 " topic:%s vgId:%d all commit-rsp received, commit completed", consumerId, pTopic, @@ -603,6 +609,9 @@ static int32_t tmqCommitCb(void* param, SDataBuf* pBuf, int32_t code) { static int32_t doSendCommitMsg(tmq_t* tmq, int32_t vgId, SEpSet* epSet, STqOffsetVal* offset, const char* pTopicName, SMqCommitCbParamSet* pParamSet) { + if (tmq == NULL || epSet == NULL || offset == NULL || pTopicName == NULL || pParamSet == NULL) { + return TSDB_CODE_INVALID_PARA; + } SMqVgOffset pOffset = {0}; pOffset.consumerId = tmq->consumerId; @@ -673,6 +682,9 @@ static int32_t doSendCommitMsg(tmq_t* tmq, int32_t vgId, SEpSet* epSet, STqOffse } static int32_t getTopicByName(tmq_t* tmq, const char* pTopicName, SMqClientTopic** topic) { + if (tmq == NULL || pTopicName == NULL || topic == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t numOfTopics = taosArrayGetSize(tmq->clientTopics); for (int32_t i = 0; i < numOfTopics; ++i) { SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i); @@ -689,6 +701,9 @@ static int32_t getTopicByName(tmq_t* tmq, const char* pTopicName, SMqClientTopic static int32_t prepareCommitCbParamSet(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* userParam, int32_t rspNum, SMqCommitCbParamSet** ppParamSet) { + if (tmq == NULL || ppParamSet == NULL) { + return TSDB_CODE_INVALID_PARA; + } SMqCommitCbParamSet* pParamSet = taosMemoryCalloc(1, sizeof(SMqCommitCbParamSet)); if (pParamSet == NULL) { return terrno; @@ -704,6 +719,9 @@ static int32_t prepareCommitCbParamSet(tmq_t* tmq, tmq_commit_cb* pCommitFp, voi } static int32_t getClientVg(tmq_t* tmq, char* pTopicName, int32_t vgId, SMqClientVg** pVg) { + if (tmq == NULL || pTopicName == NULL || pVg == NULL) { + return TSDB_CODE_INVALID_PARA; + } SMqClientTopic* pTopic = NULL; int32_t code = getTopicByName(tmq, pTopicName, &pTopic); if (code != 0) { @@ -724,6 +742,9 @@ static int32_t getClientVg(tmq_t* tmq, char* pTopicName, int32_t vgId, SMqClient } static int32_t innerCommit(tmq_t* tmq, char* pTopicName, STqOffsetVal* offsetVal, SMqClientVg* pVg, SMqCommitCbParamSet* pParamSet){ + if (tmq == NULL || pTopicName == NULL || offsetVal == NULL || pVg == NULL || pParamSet == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; if (offsetVal->type <= 0) { code = TSDB_CODE_TMQ_INVALID_MSG; @@ -754,6 +775,9 @@ static int32_t innerCommit(tmq_t* tmq, char* pTopicName, STqOffsetVal* offsetVal static int32_t asyncCommitOffset(tmq_t* tmq, char* pTopicName, int32_t vgId, STqOffsetVal* offsetVal, tmq_commit_cb* pCommitFp, void* userParam) { + if (tmq == NULL || pTopicName == NULL || offsetVal == NULL) { + return TSDB_CODE_INVALID_PARA; + } tqInfoC("consumer:0x%" PRIx64 " do manual commit offset for %s, vgId:%d", tmq->consumerId, pTopicName, vgId); SMqCommitCbParamSet* pParamSet = NULL; int32_t code = prepareCommitCbParamSet(tmq, pCommitFp, userParam, 0, &pParamSet); @@ -807,6 +831,9 @@ end: } static int32_t innerCommitAll(tmq_t* tmq, SMqCommitCbParamSet* pParamSet){ + if (tmq == NULL || pParamSet == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; taosRLockLatch(&tmq->lock); int32_t numOfTopics = taosArrayGetSize(tmq->clientTopics); @@ -842,6 +869,9 @@ END: } static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* userParam) { + if (tmq == NULL) { + return; + } int32_t code = 0; SMqCommitCbParamSet* pParamSet = NULL; // init waitingRspNum as DEFAULT_COMMIT_CNT to prevent concurrency issue @@ -1071,12 +1101,15 @@ END: } static void defaultCommitCbFn(tmq_t* pTmq, int32_t code, void* param) { - if (code != 0) { + if (code != 0 && pTmq != NULL) { tqErrorC("consumer:0x%" PRIx64 ", failed to commit offset, code:%s", pTmq->consumerId, tstrerror(code)); } } static void tmqFreeRspWrapper(SMqRspWrapper* rspWrapper) { + if (rspWrapper == NULL) { + return; + } if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__EP_RSP) { tDeleteSMqAskEpRsp(&rspWrapper->epRsp); } else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__POLL_DATA_RSP) { @@ -1091,12 +1124,18 @@ static void tmqFreeRspWrapper(SMqRspWrapper* rspWrapper) { } static void freeClientVg(void* param) { + if (param == NULL) { + return; + } SMqClientVg* pVg = param; tOffsetDestroy(&pVg->offsetInfo.endOffset); tOffsetDestroy(&pVg->offsetInfo.beginOffset); tOffsetDestroy(&pVg->offsetInfo.committedOffset); } static void freeClientTopic(void* param) { + if (param == NULL) { + return; + } SMqClientTopic* pTopic = param; taosMemoryFreeClear(pTopic->schema.pSchema); taosArrayDestroyEx(pTopic->vgs, freeClientVg); @@ -1104,6 +1143,9 @@ static void freeClientTopic(void* param) { static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopicEp, SHashObj* pVgOffsetHashMap, tmq_t* tmq) { + if (pTopic == NULL || pTopicEp == NULL || pVgOffsetHashMap == NULL || tmq == NULL) { + return; + } pTopic->schema = pTopicEp->schema; pTopicEp->schema.nCols = 0; pTopicEp->schema.pSchema = NULL; @@ -1167,6 +1209,9 @@ static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopic } static void buildNewTopicList(tmq_t* tmq, SArray* newTopics, const SMqAskEpRsp* pRsp){ + if (tmq == NULL || newTopics == NULL || pRsp == NULL) { + return; + } SHashObj* pVgOffsetHashMap = taosHashInit(64, MurmurHash3_32, false, HASH_NO_LOCK); if (pVgOffsetHashMap == NULL) { tqErrorC("consumer:0x%" PRIx64 " taos hash init null, code:%d", tmq->consumerId, terrno); @@ -1221,6 +1266,9 @@ static void buildNewTopicList(tmq_t* tmq, SArray* newTopics, const SMqAskEpRsp* } static void doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) { + if (tmq == NULL || pRsp == NULL) { + return; + } int32_t topicNumGet = taosArrayGetSize(pRsp->topics); // vnode transform (epoch == tmq->epoch && topicNumGet != 0) // ask ep rsp (epoch == tmq->epoch && topicNumGet == 0) @@ -1337,6 +1385,9 @@ static int32_t askEpCb(void* param, SDataBuf* pMsg, int32_t code) { } static int32_t askEp(tmq_t* pTmq, void* param, bool sync, bool updateEpSet) { + if (pTmq == NULL) { + return TSDB_CODE_INVALID_PARA; + } SMqAskEpReq req = {0}; req.consumerId = pTmq->consumerId; req.epoch = updateEpSet ? -1 : pTmq->epoch; @@ -1395,6 +1446,9 @@ static int32_t askEp(tmq_t* pTmq, void* param, bool sync, bool updateEpSet) { } void tmqHandleAllDelayedTask(tmq_t* pTmq) { + if (pTmq == NULL) { + return; + } STaosQall* qall = NULL; int32_t code = 0; @@ -1443,6 +1497,7 @@ void tmqHandleAllDelayedTask(tmq_t* pTmq) { } void tmqClearUnhandleMsg(tmq_t* tmq) { + if (tmq == NULL) return; SMqRspWrapper* rspWrapper = NULL; while (taosGetQitem(tmq->qall, (void**)&rspWrapper) != 0) { tmqFreeRspWrapper(rspWrapper); @@ -1507,6 +1562,7 @@ int32_t tmq_subscription(tmq_t* tmq, tmq_list_t** topics) { } void tmqFreeImpl(void* handle) { + if (handle == NULL) return; tmq_t* tmq = (tmq_t*)handle; int64_t id = tmq->consumerId; @@ -1715,6 +1771,7 @@ _failed: } static int32_t syncAskEp(tmq_t* pTmq) { + if (pTmq == NULL) return TSDB_CODE_INVALID_PARA; SAskEpInfo* pInfo = taosMemoryMalloc(sizeof(SAskEpInfo)); if (pInfo == NULL) return terrno; if (tsem2_init(&pInfo->sem, 0, 0) != 0) { @@ -1897,6 +1954,9 @@ void tmq_conf_set_auto_commit_cb(tmq_conf_t* conf, tmq_commit_cb* cb, void* para } static void getVgInfo(tmq_t* tmq, char* topicName, int32_t vgId, SMqClientVg** pVg) { + if (tmq == NULL || topicName == NULL || pVg == NULL) { + return; + } int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics); for (int i = 0; i < topicNumCur; i++) { SMqClientTopic* pTopicCur = taosArrayGet(tmq->clientTopics, i); @@ -1914,6 +1974,9 @@ static void getVgInfo(tmq_t* tmq, char* topicName, int32_t vgId, SMqClientVg** p } static SMqClientTopic* getTopicInfo(tmq_t* tmq, char* topicName) { + if (tmq == NULL || topicName == NULL) { + return NULL; + } int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics); for (int i = 0; i < topicNumCur; i++) { SMqClientTopic* pTopicCur = taosArrayGet(tmq->clientTopics, i); @@ -2026,6 +2089,9 @@ EXIT: } void tmqBuildConsumeReqImpl(SMqPollReq* pReq, tmq_t* tmq, int64_t timeout, SMqClientTopic* pTopic, SMqClientVg* pVg) { + if (pReq == NULL || tmq == NULL || pTopic == NULL || pVg == NULL) { + return; + } (void)snprintf(pReq->subKey, TSDB_SUBSCRIBE_KEY_LEN, "%s%s%s", tmq->groupId, TMQ_SEPARATOR, pTopic->topicName); pReq->withTbName = tmq->withTbName; pReq->consumerId = tmq->consumerId; @@ -2072,7 +2138,7 @@ void changeByteEndian(char* pData) { } static void tmqGetRawDataRowsPrecisionFromRes(void* pRetrieve, void** rawData, int64_t* rows, int32_t* precision) { - if (pRetrieve == NULL) { + if (pRetrieve == NULL || rawData == NULL || rows == NULL) { return; } if (*(int64_t*)pRetrieve == 0) { @@ -2092,6 +2158,9 @@ static void tmqGetRawDataRowsPrecisionFromRes(void* pRetrieve, void** rawData, i static void tmqBuildRspFromWrapperInner(SMqPollRspWrapper* pWrapper, SMqClientVg* pVg, int64_t* numOfRows, SMqRspObj* pRspObj) { + if (pWrapper == NULL || pVg == NULL || numOfRows == NULL || pRspObj == NULL) { + return; + } pRspObj->resIter = -1; pRspObj->resInfo.totalRows = 0; pRspObj->resInfo.precision = TSDB_TIME_PRECISION_MILLI; @@ -2130,6 +2199,9 @@ static void tmqBuildRspFromWrapperInner(SMqPollRspWrapper* pWrapper, SMqClientVg } static int32_t doTmqPollImpl(tmq_t* pTmq, SMqClientTopic* pTopic, SMqClientVg* pVg, int64_t timeout) { + if (pTmq == NULL || pTopic == NULL || pVg == NULL) { + return TSDB_CODE_INVALID_MSG; + } SMqPollReq req = {0}; char* msg = NULL; SMqPollCbParam* pParam = NULL; @@ -2199,6 +2271,9 @@ static int32_t doTmqPollImpl(tmq_t* pTmq, SMqClientTopic* pTopic, SMqClientVg* p // broadcast the poll request to all related vnodes static int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { + if (tmq == NULL) { + return TSDB_CODE_INVALID_MSG; + } int32_t code = 0; taosWLockLatch(&tmq->lock); @@ -2258,6 +2333,9 @@ end: static void updateVgInfo(SMqClientVg* pVg, STqOffsetVal* reqOffset, STqOffsetVal* rspOffset, int64_t sver, int64_t ever, int64_t consumerId, bool hasData) { + if (pVg == NULL || reqOffset == NULL || rspOffset == NULL) { + return; + } if (!pVg->seekUpdated) { tqDebugC("consumer:0x%" PRIx64 " local offset is update, since seekupdate not set", consumerId); if (hasData) { @@ -2283,6 +2361,9 @@ static SMqRspObj* buildRsp(SMqPollRspWrapper* pollRspWrapper){ SMqBatchMetaRsp batchMetaRsp; } MEMSIZE; + if (pollRspWrapper == NULL) { + return NULL; + } SMqRspObj* pRspObj = taosMemoryCalloc(1, sizeof(SMqRspObj)); if (pRspObj == NULL) { tqErrorC("buildRsp:failed to allocate memory"); @@ -2297,6 +2378,9 @@ static SMqRspObj* buildRsp(SMqPollRspWrapper* pollRspWrapper){ } static void processMqRspError(tmq_t* tmq, SMqRspWrapper* pRspWrapper){ + if (tmq == NULL || pRspWrapper == NULL) { + return; + } SMqPollRspWrapper* pollRspWrapper = &pRspWrapper->pollRsp; if (pRspWrapper->code == TSDB_CODE_VND_INVALID_VGROUP_ID) { // for vnode transform @@ -2322,6 +2406,9 @@ static void processMqRspError(tmq_t* tmq, SMqRspWrapper* pRspWrapper){ taosWUnLockLatch(&tmq->lock); } static SMqRspObj* processMqRsp(tmq_t* tmq, SMqRspWrapper* pRspWrapper){ + if (tmq == NULL || pRspWrapper == NULL) { + return NULL; + } SMqRspObj* pRspObj = NULL; if (pRspWrapper->tmqRspType == TMQ_MSG_TYPE__EP_RSP) { @@ -2401,6 +2488,9 @@ static SMqRspObj* processMqRsp(tmq_t* tmq, SMqRspWrapper* pRspWrapper){ } static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout) { + if (tmq == NULL) { + return NULL; + } tqDebugC("consumer:0x%" PRIx64 " start to handle the rsp, total:%d", tmq->consumerId, taosQallItemSize(tmq->qall)); void* returnVal = NULL; @@ -2478,6 +2568,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) { } static void displayConsumeStatistics(tmq_t* pTmq) { + if (pTmq == NULL) return; taosRLockLatch(&pTmq->lock); int32_t numOfTopics = taosArrayGetSize(pTmq->clientTopics); tqInfoC("consumer:0x%" PRIx64 " closing poll:%" PRId64 " rows:%" PRId64 " topics:%d, final epoch:%d", @@ -2680,7 +2771,11 @@ void tmq_commit_async(tmq_t* tmq, const TAOS_RES* pRes, tmq_commit_cb* cb, void* } } -static void commitCallBackFn(tmq_t* UNUSED_PARAM(tmq), int32_t code, void* param) { +static void commitCallBackFn(tmq_t* tmq, int32_t code, void* param) { + if (param == NULL) { + tqErrorC("invalid param in commit cb"); + return; + } SSyncCommitInfo* pInfo = (SSyncCommitInfo*)param; pInfo->code = code; if (tsem2_post(&pInfo->sem) != 0){ @@ -2732,6 +2827,10 @@ int32_t tmq_commit_sync(tmq_t* tmq, const TAOS_RES* pRes) { // wal range will be ok after calling tmq_get_topic_assignment or poll interface static int32_t checkWalRange(SVgOffsetInfo* offset, int64_t value) { + if (offset == NULL) { + tqErrorC("invalid offset, null"); + return TSDB_CODE_INVALID_PARA; + } if (offset->walVerBegin == -1 || offset->walVerEnd == -1) { tqErrorC("Assignment or poll interface need to be called first"); return TSDB_CODE_TMQ_NEED_INITIALIZED; @@ -2852,6 +2951,9 @@ end: int32_t tmqGetNextResInfo(TAOS_RES* res, bool convertUcs4, SReqResultInfo** pResInfo) { + if (res == NULL || pResInfo == NULL) { + return TSDB_CODE_INVALID_PARA; + } SMqRspObj* pRspObj = (SMqRspObj*)res; SMqDataRsp* data = &pRspObj->dataRsp; @@ -2889,7 +2991,7 @@ int32_t tmqGetNextResInfo(TAOS_RES* res, bool convertUcs4, SReqResultInfo** pRes } static int32_t tmqGetWalInfoCb(void* param, SDataBuf* pMsg, int32_t code) { - if (param == NULL) { + if (param == NULL || pMsg == NULL) { return code; } SMqVgWalInfoParam* pParam = param; @@ -2962,6 +3064,9 @@ static bool isInSnapshotMode(int8_t type, bool useSnapshot) { } static int32_t tmCommittedCb(void* param, SDataBuf* pMsg, int32_t code) { + if (param == NULL) { + return code; + } SMqCommittedParam* pParam = param; if (code != 0) { @@ -2992,6 +3097,9 @@ end: } int64_t getCommittedFromServer(tmq_t* tmq, char* tname, int32_t vgId, SEpSet* epSet) { + if (tmq == NULL || tname == NULL || epSet == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; SMqVgOffset pOffset = {0}; diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp index 5a4393bfed..3e4667fbe7 100644 --- a/source/client/test/clientTests.cpp +++ b/source/client/test/clientTests.cpp @@ -788,6 +788,8 @@ TEST(clientCase, insert_test) { } TEST(clientCase, projection_query_tables) { + taos_options(TSDB_OPTION_CONFIGDIR, "/home/lisa/first/cfg"); + TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(pConn, nullptr); @@ -796,6 +798,12 @@ TEST(clientCase, projection_query_tables) { pRes= taos_query(pConn, "use abc1"); taos_free_result(pRes); + pRes = taos_query(pConn, "select forecast(k,'algo=arima,wncheck=0') from t1 where ts<='2024-11-15 1:7:44'"); + if (taos_errno(pRes) != 0) { + (void)printf("failed to create table tu, reason:%s\n", taos_errstr(pRes)); + } + taos_free_result(pRes); + pRes = taos_query(pConn, "create table tu using st2 tags(2)"); if (taos_errno(pRes) != 0) { (void)printf("failed to create table tu, reason:%s\n", taos_errstr(pRes)); diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c index c1ab7ccff0..f1aacfed15 100644 --- a/source/common/src/tdataformat.c +++ b/source/common/src/tdataformat.c @@ -449,9 +449,11 @@ static int32_t tBindInfoCompare(const void *p1, const void *p2, const void *para * `infoSorted` is whether the bind information is sorted by column id * `pTSchema` is the schema of the table * `rowArray` is the array to store the rows + * `pOrdered` is the pointer to store ordered + * `pDupTs` is the pointer to store duplicateTs */ int32_t tRowBuildFromBind(SBindInfo *infos, int32_t numOfInfos, bool infoSorted, const STSchema *pTSchema, - SArray *rowArray) { + SArray *rowArray, bool *pOrdered, bool *pDupTs) { if (infos == NULL || numOfInfos <= 0 || numOfInfos > pTSchema->numOfCols || pTSchema == NULL || rowArray == NULL) { return TSDB_CODE_INVALID_PARA; } @@ -469,6 +471,7 @@ int32_t tRowBuildFromBind(SBindInfo *infos, int32_t numOfInfos, bool infoSorted, return terrno; } + SRowKey rowKey, lastRowKey; for (int32_t iRow = 0; iRow < numOfRows; iRow++) { taosArrayClear(colValArray); @@ -507,6 +510,22 @@ int32_t tRowBuildFromBind(SBindInfo *infos, int32_t numOfInfos, bool infoSorted, code = terrno; goto _exit; } + + if (pOrdered && pDupTs) { + tRowGetKey(row, &rowKey); + if (iRow == 0) { + *pOrdered = true; + *pDupTs = false; + } else { + // no more compare if we already get disordered or duplicate rows + if (*pOrdered && !*pDupTs) { + int32_t code = tRowKeyCompare(&rowKey, &lastRowKey); + *pOrdered = (code >= 0); + *pDupTs = (code == 0); + } + } + lastRowKey = rowKey; + } } _exit: @@ -3235,9 +3254,11 @@ _exit: * `infoSorted` is whether the bind information is sorted by column id * `pTSchema` is the schema of the table * `rowArray` is the array to store the rows + * `pOrdered` is the pointer to store ordered + * `pDupTs` is the pointer to store duplicateTs */ int32_t tRowBuildFromBind2(SBindInfo2 *infos, int32_t numOfInfos, bool infoSorted, const STSchema *pTSchema, - SArray *rowArray) { + SArray *rowArray, bool *pOrdered, bool *pDupTs) { if (infos == NULL || numOfInfos <= 0 || numOfInfos > pTSchema->numOfCols || pTSchema == NULL || rowArray == NULL) { return TSDB_CODE_INVALID_PARA; } @@ -3266,6 +3287,7 @@ int32_t tRowBuildFromBind2(SBindInfo2 *infos, int32_t numOfInfos, bool infoSorte } } + SRowKey rowKey, lastRowKey; for (int32_t iRow = 0; iRow < numOfRows; iRow++) { taosArrayClear(colValArray); @@ -3317,6 +3339,22 @@ int32_t tRowBuildFromBind2(SBindInfo2 *infos, int32_t numOfInfos, bool infoSorte code = terrno; goto _exit; } + + if (pOrdered && pDupTs) { + tRowGetKey(row, &rowKey); + if (iRow == 0) { + *pOrdered = true; + *pDupTs = false; + } else { + // no more compare if we already get disordered or duplicate rows + if (*pOrdered && !*pDupTs) { + int32_t code = tRowKeyCompare(&rowKey, &lastRowKey); + *pOrdered = (code >= 0); + *pDupTs = (code == 0); + } + } + lastRowKey = rowKey; + } } _exit: diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 905dcb4fda..c58ad32a18 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -14,12 +14,12 @@ */ #define _DEFAULT_SOURCE +#include "tglobal.h" #include "cJSON.h" #include "defines.h" #include "os.h" #include "osString.h" #include "tconfig.h" -#include "tglobal.h" #include "tgrant.h" #include "tjson.h" #include "tlog.h" @@ -104,6 +104,7 @@ int32_t tsRetentionSpeedLimitMB = 0; // unlimited const char *tsAlterCompactTaskKeywords = "max_compact_tasks"; int32_t tsNumOfCompactThreads = 2; +int32_t tsNumOfRetentionThreads = 1; // sync raft int32_t tsElectInterval = 25 * 1000; @@ -328,6 +329,7 @@ int64_t tsStreamBufferSize = 128 * 1024 * 1024; bool tsFilterScalarMode = false; int tsResolveFQDNRetryTime = 100; // seconds int tsStreamAggCnt = 100000; +bool tsStreamCoverage = false; bool tsUpdateCacheBatch = true; @@ -390,6 +392,16 @@ int32_t taosSetTfsCfg(SConfig *pCfg) { int32_t taosSetTfsCfg(SConfig *pCfg); #endif +#ifndef _STORAGE +int32_t cfgUpdateTfsItemDisable(SConfig *pCfg, const char *value, void *pTfs) { return TSDB_CODE_INVALID_CFG; } +#else +int32_t cfgUpdateTfsItemDisable(SConfig *pCfg, const char *value, void *pTfs); +#endif + +int32_t taosUpdateTfsItemDisable(SConfig *pCfg, const char *value, void *pTfs) { + return cfgUpdateTfsItemDisable(pCfg, value, pTfs); +} + static int32_t taosSplitS3Cfg(SConfig *pCfg, const char *name, char gVarible[TSDB_MAX_EP_NUM][TSDB_FQDN_LEN], int8_t *pNum) { int32_t code = TSDB_CODE_SUCCESS; @@ -733,6 +745,9 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { CFG_DYN_CLIENT, CFG_CATEGORY_LOCAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tsmaDataDeleteMark", tsmaDataDeleteMark, 60 * 60 * 1000, INT64_MAX, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT, CFG_CATEGORY_LOCAL)); + + TAOS_CHECK_RETURN(cfgAddBool(pCfg, "streamCoverage", tsStreamCoverage, CFG_DYN_CLIENT, CFG_DYN_CLIENT, CFG_CATEGORY_LOCAL)); + TAOS_RETURN(TSDB_CODE_SUCCESS); } @@ -1463,6 +1478,9 @@ static int32_t taosSetClientCfg(SConfig *pCfg) { TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "bypassFlag"); tsBypassFlag = pItem->i32; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "streamCoverage"); + tsStreamCoverage = pItem->bval; + TAOS_RETURN(TSDB_CODE_SUCCESS); } @@ -2381,6 +2399,10 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) { code = TSDB_CODE_SUCCESS; goto _exit; } + if (strcasecmp(name, "dataDir") == 0) { + code = TSDB_CODE_SUCCESS; + goto _exit; + } { // 'bool/int32_t/int64_t/float/double' variables with general modification function static OptionNameAndVar debugOptions[] = { @@ -2735,7 +2757,8 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) { {"maxTsmaCalcDelay", &tsMaxTsmaCalcDelay}, {"tsmaDataDeleteMark", &tsmaDataDeleteMark}, {"numOfRpcSessions", &tsNumOfRpcSessions}, - {"bypassFlag", &tsBypassFlag}}; + {"bypassFlag", &tsBypassFlag}, + {"streamCoverage", &tsStreamCoverage}}; if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) { code = taosCfgSetOption(options, tListLen(options), pItem, false); diff --git a/source/common/src/ttypes.c b/source/common/src/ttypes.c index 766e91f54e..1a0740b2b9 100644 --- a/source/common/src/ttypes.c +++ b/source/common/src/ttypes.c @@ -18,7 +18,7 @@ #include "tcompression.h" const int32_t TYPE_BYTES[21] = { - -1, // TSDB_DATA_TYPE_NULL + 2, // TSDB_DATA_TYPE_NULL CHAR_BYTES, // TSDB_DATA_TYPE_BOOL CHAR_BYTES, // TSDB_DATA_TYPE_TINYINT SHORT_BYTES, // TSDB_DATA_TYPE_SMALLINT @@ -42,7 +42,7 @@ const int32_t TYPE_BYTES[21] = { }; tDataTypeDescriptor tDataTypes[TSDB_DATA_TYPE_MAX] = { - {TSDB_DATA_TYPE_NULL, 6, 1, "NOTYPE", 0, 0, NULL, NULL}, + {TSDB_DATA_TYPE_NULL, 6, 2, "NOTYPE", 0, 0, NULL, NULL}, {TSDB_DATA_TYPE_BOOL, 4, CHAR_BYTES, "BOOL", false, true, tsCompressBool, tsDecompressBool}, {TSDB_DATA_TYPE_TINYINT, 7, CHAR_BYTES, "TINYINT", INT8_MIN, INT8_MAX, tsCompressTinyint, tsDecompressTinyint}, {TSDB_DATA_TYPE_SMALLINT, 8, SHORT_BYTES, "SMALLINT", INT16_MIN, INT16_MAX, tsCompressSmallint, diff --git a/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h b/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h index 2108a097ee..45a597ec90 100644 --- a/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h +++ b/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h @@ -25,6 +25,7 @@ extern "C" { typedef struct SDnodeMgmt { SDnodeData *pData; SMsgCb msgCb; + STfs *pTfs; const char *path; const char *name; TdThread statusThread; @@ -69,6 +70,7 @@ int32_t dmStartStatusThread(SDnodeMgmt *pMgmt); int32_t dmStartConfigThread(SDnodeMgmt *pMgmt); int32_t dmStartStatusInfoThread(SDnodeMgmt *pMgmt); void dmStopStatusThread(SDnodeMgmt *pMgmt); +void dmStopConfigThread(SDnodeMgmt *pMgmt); void dmStopStatusInfoThread(SDnodeMgmt *pMgmt); int32_t dmStartNotifyThread(SDnodeMgmt *pMgmt); void dmStopNotifyThread(SDnodeMgmt *pMgmt); diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index 065b0517ee..ece21e88f9 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -475,7 +475,7 @@ int32_t dmProcessGrantRsp(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) { return 0; } -extern void tsdbAlterMaxCompactTasks(); +extern void tsdbAlterNumCompactThreads(); static int32_t dmAlterMaxCompactTask(const char *value) { int32_t max_compact_tasks; char *endptr = NULL; @@ -489,7 +489,7 @@ static int32_t dmAlterMaxCompactTask(const char *value) { dInfo("alter max compact tasks from %d to %d", tsNumOfCompactThreads, max_compact_tasks); tsNumOfCompactThreads = max_compact_tasks; #ifdef TD_ENTERPRISE - tsdbAlterMaxCompactTasks(); + (void)tsdbAlterNumCompactThreads(); #endif } @@ -499,9 +499,15 @@ static int32_t dmAlterMaxCompactTask(const char *value) { int32_t dmProcessConfigReq(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) { int32_t code = 0; SDCfgDnodeReq cfgReq = {0}; + SConfig *pCfg = taosGetCfg(); + SConfigItem *pItem = NULL; + if (tDeserializeSDCfgDnodeReq(pMsg->pCont, pMsg->contLen, &cfgReq) != 0) { return TSDB_CODE_INVALID_MSG; } + if (strcasecmp(cfgReq.config, "dataDir") == 0) { + return taosUpdateTfsItemDisable(pCfg, cfgReq.value, pMgmt->pTfs); + } if (strncmp(cfgReq.config, tsAlterCompactTaskKeywords, strlen(tsAlterCompactTaskKeywords) + 1) == 0) { return dmAlterMaxCompactTask(cfgReq.value); @@ -509,9 +515,6 @@ int32_t dmProcessConfigReq(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) { dInfo("start to config, option:%s, value:%s", cfgReq.config, cfgReq.value); - SConfig *pCfg = taosGetCfg(); - SConfigItem *pItem = NULL; - code = cfgGetAndSetItem(pCfg, &pItem, cfgReq.config, cfgReq.value, CFG_STYPE_ALTER_SERVER_CMD, true); if (code != 0) { if (strncasecmp(cfgReq.config, "resetlog", strlen("resetlog")) == 0) { diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c index b58c1a216d..ed6aff1b13 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c @@ -52,6 +52,7 @@ static void dmStopMgmt(SDnodeMgmt *pMgmt) { dmStopMonitorThread(pMgmt); dmStopAuditThread(pMgmt); dmStopStatusThread(pMgmt); + dmStopConfigThread(pMgmt); dmStopStatusInfoThread(pMgmt); #if defined(TD_ENTERPRISE) dmStopNotifyThread(pMgmt); @@ -68,6 +69,7 @@ static int32_t dmOpenMgmt(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { pMgmt->pData = pInput->pData; pMgmt->msgCb = pInput->msgCb; + pMgmt->pTfs = pInput->pTfs; pMgmt->path = pInput->path; pMgmt->name = pInput->name; pMgmt->processCreateNodeFp = pInput->processCreateNodeFp; diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c b/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c index 8f890f6805..ef4e76031d 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c @@ -343,7 +343,7 @@ int32_t dmStartConfigThread(SDnodeMgmt *pMgmt) { int32_t code = 0; TdThreadAttr thAttr; (void)taosThreadAttrInit(&thAttr); - (void)taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_DETACHED); + (void)taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE); if (taosThreadCreate(&pMgmt->configThread, &thAttr, dmConfigThreadFp, pMgmt) != 0) { code = TAOS_SYSTEM_ERROR(errno); dError("failed to create config thread since %s", tstrerror(code)); @@ -378,6 +378,13 @@ void dmStopStatusThread(SDnodeMgmt *pMgmt) { } } +void dmStopConfigThread(SDnodeMgmt *pMgmt) { + if (taosCheckPthreadValid(pMgmt->configThread)) { + (void)taosThreadJoin(pMgmt->configThread, NULL); + taosThreadClear(&pMgmt->configThread); + } +} + void dmStopStatusInfoThread(SDnodeMgmt *pMgmt) { if (taosCheckPthreadValid(pMgmt->statusInfoThread)) { (void)taosThreadJoin(pMgmt->statusInfoThread, NULL); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index 83043b4393..0f0dbfb3f1 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -211,7 +211,7 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) { #if defined(TD_ENTERPRISE) pCfg->tdbEncryptAlgorithm = pCreate->encryptAlgorithm; if (pCfg->tdbEncryptAlgorithm == DND_CA_SM4) { - tstrncpy(pCfg->tdbEncryptKey, tsEncryptKey, ENCRYPT_KEY_LEN); + tstrncpy(pCfg->tdbEncryptKey, tsEncryptKey, ENCRYPT_KEY_LEN + 1); } #else pCfg->tdbEncryptAlgorithm = 0; diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 537c1f6297..6d5e117181 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -896,7 +896,7 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("vnode-sync", "initialized"); - if ((code = vnodeInit(tsNumOfCommitThreads, pInput->stopDnodeFp)) != 0) { + if ((code = vnodeInit(pInput->stopDnodeFp)) != 0) { dError("failed to init vnode since %s", tstrerror(code)); goto _OVER; } diff --git a/source/dnode/mnode/impl/inc/mndConfig.h b/source/dnode/mnode/impl/inc/mndConfig.h index bbfa3f4a65..b918383afb 100644 --- a/source/dnode/mnode/impl/inc/mndConfig.h +++ b/source/dnode/mnode/impl/inc/mndConfig.h @@ -29,7 +29,7 @@ static int32_t mndCfgActionInsert(SSdb *pSdb, SConfigObj *obj); static int32_t mndCfgActionDelete(SSdb *pSdb, SConfigObj *obj); static int32_t mndCfgActionUpdate(SSdb *pSdb, SConfigObj *oldItem, SConfigObj *newObj); static int32_t mndCfgActionDeploy(SMnode *pMnode); -static int32_t mndCfgActionPrepare(SMnode *pMnode); +static int32_t mndCfgActionAfterRestored(SMnode *pMnode); static int32_t mndProcessConfigReq(SRpcMsg *pReq); #ifdef __cplusplus diff --git a/source/dnode/mnode/impl/src/mndConfig.c b/source/dnode/mnode/impl/src/mndConfig.c index 0247a1c88c..74bb0561cd 100644 --- a/source/dnode/mnode/impl/src/mndConfig.c +++ b/source/dnode/mnode/impl/src/mndConfig.c @@ -18,6 +18,7 @@ #include "mndConfig.h" #include "mndDnode.h" #include "mndPrivilege.h" +#include "mndSync.h" #include "mndTrans.h" #include "mndUser.h" #include "tutil.h" @@ -52,7 +53,7 @@ int32_t mndInitConfig(SMnode *pMnode) { .updateFp = (SdbUpdateFp)mndCfgActionUpdate, .deleteFp = (SdbDeleteFp)mndCfgActionDelete, .deployFp = (SdbDeployFp)mndCfgActionDeploy, - .prepareFp = (SdbPrepareFp)mndCfgActionPrepare}; + .afterRestoredFp = (SdbAfterRestoredFp)mndCfgActionAfterRestored}; mndSetMsgHandle(pMnode, TDMT_MND_CONFIG, mndProcessConfigReq); mndSetMsgHandle(pMnode, TDMT_MND_CONFIG_DNODE, mndProcessConfigDnodeReq); @@ -213,7 +214,7 @@ static int32_t mndCfgActionUpdate(SSdb *pSdb, SConfigObj *pOld, SConfigObj *pNew static int32_t mndCfgActionDeploy(SMnode *pMnode) { return mndInitWriteCfg(pMnode); } -static int32_t mndCfgActionPrepare(SMnode *pMnode) { return mndTryRebuildCfg(pMnode); } +static int32_t mndCfgActionAfterRestored(SMnode *pMnode) { return mndTryRebuildCfg(pMnode); } static int32_t mndProcessConfigReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; @@ -340,11 +341,15 @@ _OVER: } int32_t mndTryRebuildCfg(SMnode *pMnode) { + if (!mndIsLeader(pMnode)) { + return TSDB_CODE_SUCCESS; + } int32_t code = 0; int32_t sz = -1; STrans *pTrans = NULL; SAcctObj *vObj = NULL, *obj = NULL; SArray *addArray = NULL; + vObj = sdbAcquire(pMnode->pSdb, SDB_CFG, "tsmmConfigVersion"); if (vObj == NULL) { if ((code = mndInitWriteCfg(pMnode)) < 0) goto _exit; diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index fbce8f544a..6e9dc6ab17 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -52,6 +52,9 @@ int32_t mndInitConsumer(SMnode *pMnode) { .deleteFp = (SdbDeleteFp)mndConsumerActionDelete, }; + if (pMnode == NULL){ + return TSDB_CODE_INVALID_PARA; + } mndSetMsgHandle(pMnode, TDMT_MND_TMQ_SUBSCRIBE, mndProcessSubscribeReq); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_HB, mndProcessMqHbReq); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_ASK_EP, mndProcessAskEpReq); @@ -66,6 +69,9 @@ int32_t mndInitConsumer(SMnode *pMnode) { void mndCleanupConsumer(SMnode *pMnode) {} int32_t mndSendConsumerMsg(SMnode *pMnode, int64_t consumerId, uint16_t msgType, SRpcHandleInfo *info) { + if (pMnode == NULL || info == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; void *msg = rpcMallocCont(sizeof(int64_t)); MND_TMQ_NULL_CHECK(msg); @@ -88,6 +94,9 @@ END: } static int32_t validateTopics(STrans* pTrans, SCMSubscribeReq *subscribe, SMnode *pMnode, const char *pUser) { + if (pTrans == NULL || subscribe == NULL || pMnode == NULL || pUser == NULL) { + return TSDB_CODE_INVALID_PARA; + } SMqTopicObj *pTopic = NULL; int32_t code = 0; @@ -130,6 +139,9 @@ END: } static int32_t mndProcessConsumerClearMsg(SRpcMsg *pMsg) { + if (pMsg == NULL || pMsg->pCont == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; SMnode *pMnode = pMsg->info.node; SMqConsumerClearMsg *pClearMsg = pMsg->pCont; @@ -155,6 +167,9 @@ END: } static int32_t checkPrivilege(SMnode *pMnode, SMqConsumerObj *pConsumer, SMqHbRsp *rsp, char *user) { + if (pMnode == NULL || pConsumer == NULL || rsp == NULL || user == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; rsp->topicPrivileges = taosArrayInit(taosArrayGetSize(pConsumer->currentTopics), sizeof(STopicPrivilege)); MND_TMQ_NULL_CHECK(rsp->topicPrivileges); @@ -181,6 +196,9 @@ END: } static void storeOffsetRows(SMnode *pMnode, SMqHbReq *req, SMqConsumerObj *pConsumer){ + if (pMnode == NULL || req == NULL || pConsumer == NULL){ + return; + } for (int i = 0; i < taosArrayGetSize(req->topics); i++) { TopicOffsetRows *data = taosArrayGet(req->topics, i); if (data == NULL){ @@ -210,6 +228,9 @@ static void storeOffsetRows(SMnode *pMnode, SMqHbReq *req, SMqConsumerObj *pCons } static int32_t buildMqHbRsp(SRpcMsg *pMsg, SMqHbRsp *rsp){ + if (pMsg == NULL || rsp == NULL){ + return TSDB_CODE_INVALID_PARA; + } int32_t tlen = tSerializeSMqHbRsp(NULL, 0, rsp); if (tlen <= 0){ return TSDB_CODE_TMQ_INVALID_MSG; @@ -229,6 +250,9 @@ static int32_t buildMqHbRsp(SRpcMsg *pMsg, SMqHbRsp *rsp){ } static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) { + if (pMsg == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; SMnode *pMnode = pMsg->info.node; SMqHbReq req = {0}; @@ -256,6 +280,9 @@ END: } static int32_t addEpSetInfo(SMnode *pMnode, SMqConsumerObj *pConsumer, int32_t epoch, SMqAskEpRsp *rsp){ + if (pMnode == NULL || pConsumer == NULL || rsp == NULL){ + return TSDB_CODE_INVALID_PARA; + } taosRLockLatch(&pConsumer->lock); int32_t numOfTopics = taosArrayGetSize(pConsumer->currentTopics); @@ -359,6 +386,9 @@ static int32_t addEpSetInfo(SMnode *pMnode, SMqConsumerObj *pConsumer, int32_t e } static int32_t buildAskEpRsp(SRpcMsg *pMsg, SMqAskEpRsp *rsp, int32_t serverEpoch, int64_t consumerId){ + if (pMsg == NULL || rsp == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; // encode rsp int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqAskEpRsp(NULL, rsp); @@ -388,6 +418,9 @@ static int32_t buildAskEpRsp(SRpcMsg *pMsg, SMqAskEpRsp *rsp, int32_t serverEpoc } static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) { + if (pMsg == NULL) { + return TSDB_CODE_INVALID_PARA; + } SMnode *pMnode = pMsg->info.node; SMqAskEpReq req = {0}; SMqAskEpRsp rsp = {0}; @@ -431,6 +464,9 @@ END: } int32_t mndSetConsumerDropLogs(STrans *pTrans, SMqConsumerObj *pConsumer) { + if (pConsumer == NULL || pTrans == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; SSdbRaw *pCommitRaw = mndConsumerActionEncode(pConsumer); MND_TMQ_NULL_CHECK(pCommitRaw); @@ -445,6 +481,9 @@ END: } int32_t mndSetConsumerCommitLogs(STrans *pTrans, SMqConsumerObj *pConsumer) { + if (pConsumer == NULL || pTrans == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; SSdbRaw *pCommitRaw = mndConsumerActionEncode(pConsumer); MND_TMQ_NULL_CHECK(pCommitRaw); @@ -459,6 +498,9 @@ END: } static void freeItem(void *param) { + if (param == NULL) { + return; + } void *pItem = *(void **)param; if (pItem != NULL) { taosMemoryFree(pItem); @@ -475,6 +517,9 @@ if (taosArrayPush(pConsumerNew->array, &newTopicCopy) == NULL){\ } static int32_t getTopicAddDelete(SMqConsumerObj *pExistedConsumer, SMqConsumerObj *pConsumerNew){ + if (pExistedConsumer == NULL || pConsumerNew == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; pConsumerNew->rebNewTopics = taosArrayInit(0, sizeof(void *)); MND_TMQ_NULL_CHECK(pConsumerNew->rebNewTopics); @@ -528,6 +573,9 @@ END: } static int32_t checkAndSortTopic(SMnode *pMnode, SArray *pTopicList){ + if (pTopicList == NULL || pMnode == NULL) { + return TSDB_CODE_INVALID_PARA; + } taosArraySort(pTopicList, taosArrayCompareString); taosArrayRemoveDuplicate(pTopicList, taosArrayCompareString, freeItem); @@ -542,6 +590,9 @@ static int32_t checkAndSortTopic(SMnode *pMnode, SArray *pTopicList){ } static int32_t buildSubConsumer(SMnode *pMnode, SCMSubscribeReq *subscribe, SMqConsumerObj** ppConsumer){ + if (pMnode == NULL || subscribe == NULL) { + return TSDB_CODE_INVALID_PARA; + } int64_t consumerId = subscribe->consumerId; char *cgroup = subscribe->cgroup; SMqConsumerObj *pConsumerNew = NULL; @@ -581,6 +632,9 @@ END: } int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { + if (pMsg == NULL) { + return TSDB_CODE_INVALID_PARA; + } SMnode *pMnode = pMsg->info.node; char *msgStr = pMsg->pCont; int32_t code = 0; @@ -619,6 +673,9 @@ END: } SSdbRaw *mndConsumerActionEncode(SMqConsumerObj *pConsumer) { + if (pConsumer == NULL) { + return NULL; + } int32_t code = 0; int32_t lino = 0; terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -659,6 +716,9 @@ CM_ENCODE_OVER: } SSdbRow *mndConsumerActionDecode(SSdbRaw *pRaw) { + if (pRaw == NULL) { + return NULL; + } int32_t code = 0; int32_t lino = 0; SSdbRow *pRow = NULL; @@ -717,6 +777,9 @@ CM_DECODE_OVER: } static int32_t mndConsumerActionInsert(SSdb *pSdb, SMqConsumerObj *pConsumer) { + if (pConsumer == NULL) { + return TSDB_CODE_INVALID_PARA; + } mInfo("consumer:0x%" PRIx64 " sub insert, cgroup:%s status:%d(%s) epoch:%d", pConsumer->consumerId, pConsumer->cgroup, pConsumer->status, mndConsumerStatusName(pConsumer->status), pConsumer->epoch); pConsumer->subscribeTime = pConsumer->createTime; @@ -724,6 +787,9 @@ static int32_t mndConsumerActionInsert(SSdb *pSdb, SMqConsumerObj *pConsumer) { } static int32_t mndConsumerActionDelete(SSdb *pSdb, SMqConsumerObj *pConsumer) { + if (pConsumer == NULL) { + return TSDB_CODE_INVALID_PARA; + } mInfo("consumer:0x%" PRIx64 " perform delete action, status:(%d)%s", pConsumer->consumerId, pConsumer->status, mndConsumerStatusName(pConsumer->status)); tClearSMqConsumerObj(pConsumer); @@ -744,6 +810,9 @@ static int32_t mndConsumerActionDelete(SSdb *pSdb, SMqConsumerObj *pConsumer) { // remove from topic list static void removeFromTopicList(SArray *topicList, const char *pTopic, int64_t consumerId, char *type) { + if (topicList == NULL || pTopic == NULL) { + return; + } int32_t size = taosArrayGetSize(topicList); for (int32_t i = 0; i < size; i++) { char *p = taosArrayGetP(topicList, i); @@ -759,6 +828,9 @@ static void removeFromTopicList(SArray *topicList, const char *pTopic, int64_t c } static bool existInCurrentTopicList(const SMqConsumerObj *pConsumer, const char *pTopic) { + if (pConsumer == NULL || pTopic == NULL) { + return false; + } bool existing = false; int32_t size = taosArrayGetSize(pConsumer->currentTopics); for (int32_t i = 0; i < size; i++) { @@ -773,6 +845,9 @@ static bool existInCurrentTopicList(const SMqConsumerObj *pConsumer, const char } static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, SMqConsumerObj *pNewConsumer) { + if (pOldConsumer == NULL || pNewConsumer == NULL) { + return TSDB_CODE_INVALID_PARA; + } mInfo("consumer:0x%" PRIx64 " perform update action, update type:%d, subscribe-time:%" PRId64 ", createTime:%" PRId64, pOldConsumer->consumerId, pNewConsumer->updateType, pOldConsumer->subscribeTime, pOldConsumer->createTime); @@ -857,6 +932,9 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, } int32_t mndAcquireConsumer(SMnode *pMnode, int64_t consumerId, SMqConsumerObj** pConsumer) { + if (pMnode == NULL || pConsumer == NULL) { + return TSDB_CODE_INVALID_PARA; + } SSdb *pSdb = pMnode->pSdb; *pConsumer = sdbAcquire(pSdb, SDB_CONSUMER, &consumerId); if (*pConsumer == NULL) { @@ -866,11 +944,17 @@ int32_t mndAcquireConsumer(SMnode *pMnode, int64_t consumerId, SMqConsumerObj** } void mndReleaseConsumer(SMnode *pMnode, SMqConsumerObj *pConsumer) { + if (pMnode == NULL || pConsumer == NULL) { + return; + } SSdb *pSdb = pMnode->pSdb; sdbRelease(pSdb, pConsumer); } static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rowsCapacity) { + if (pReq == NULL || pShow == NULL || pBlock == NULL) { + return TSDB_CODE_INVALID_PARA; + } SMnode *pMnode = pReq->info.node; SSdb *pSdb = pMnode->pSdb; int32_t numOfRows = 0; @@ -1021,6 +1105,7 @@ END: } static void mndCancelGetNextConsumer(SMnode *pMnode, void *pIter) { + if (pMnode == NULL || pIter == NULL) return; SSdb *pSdb = pMnode->pSdb; sdbCancelFetchByType(pSdb, pIter, SDB_CONSUMER); } diff --git a/source/dnode/mnode/impl/src/mndIndex.c b/source/dnode/mnode/impl/src/mndIndex.c index 718c34e85a..f5dac9df65 100644 --- a/source/dnode/mnode/impl/src/mndIndex.c +++ b/source/dnode/mnode/impl/src/mndIndex.c @@ -673,8 +673,6 @@ static int32_t mndSetUpdateIdxStbCommitLogs(SMnode *pMnode, STrans *pTrans, SStb code = TSDB_CODE_MND_TAG_NOT_EXIST; TAOS_RETURN(code); } - col_id_t colId = pOld->pTags[tag].colId; - TAOS_CHECK_RETURN(mndCheckColAndTagModifiable(pMnode, pOld->name, pOld->uid, colId)); TAOS_CHECK_RETURN(mndAllocStbSchemas(pOld, pNew)); SSchema *pTag = pNew->pTags + tag; @@ -806,16 +804,7 @@ static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *re TAOS_RETURN(code); } - col_id_t colId = pStb->pTags[tag].colId; - TAOS_CHECK_RETURN(mndCheckColAndTagModifiable(pMnode, pStb->name, pStb->uid, colId)); - - // SSchema *pTag = pStb->pTags + tag; - // if (IS_IDX_ON(pTag)) { - // terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; - // return -1; - // } code = mndAddIndexImpl(pMnode, pReq, pDb, pStb, &idxObj); - TAOS_RETURN(code); } diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index aa3ffc58e8..b241af5adb 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -797,14 +797,7 @@ int32_t mndStart(SMnode *pMnode) { return -1; } mndSetRestored(pMnode, true); - } else { - if (sdbPrepare(pMnode->pSdb) != 0) { - mError("failed to prepare sdb while start mnode"); - return -1; - } - mndSetRestored(pMnode, true); } - grantReset(pMnode, TSDB_GRANT_ALL, 0); return mndInitTimer(pMnode); diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index 2db76f6312..d46968a22d 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -1805,7 +1805,6 @@ static int32_t mndUpdateSuperTableColumnCompress(SMnode *pMnode, const SStbObj * } SSchema *pTarget = &pOld->pColumns[idx]; col_id_t colId = pTarget->colId; - TAOS_CHECK_RETURN(mndCheckColAndTagModifiable(pMnode, pOld->name, pOld->uid, colId)); TAOS_CHECK_RETURN(mndAllocStbSchemas(pOld, pNew)); code = validColCmprByType(pTarget->type, p->bytes); @@ -3702,10 +3701,6 @@ static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *ta terrno = TSDB_CODE_MND_TAG_NOT_EXIST; return -1; } - col_id_t colId = pOld->pTags[tag].colId; - if (mndCheckColAndTagModifiable(pMnode, pOld->name, pOld->uid, colId) != 0) { - return -1; - } if (mndAllocStbSchemas(pOld, pNew) != 0) { return -1; } diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 3bee82e3e7..9e6188e9d9 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -1685,11 +1685,6 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) { mInfo("stream:%s,%" PRId64 " start to pause stream", pauseReq.name, pStream->uid); - if (pStream->status == STREAM_STATUS__PAUSE) { - sdbRelease(pMnode->pSdb, pStream); - return 0; - } - if ((code = mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pStream->targetDb)) != 0) { sdbRelease(pMnode->pSdb, pStream); return code; @@ -1778,7 +1773,6 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) { // pause stream taosWLockLatch(&pStream->lock); - pStream->status = STREAM_STATUS__PAUSE; code = mndPersistTransLog(pStream, pTrans, SDB_STATUS_READY); if (code) { taosWUnLockLatch(&pStream->lock); @@ -1829,11 +1823,6 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) { } } - if (pStream->status != STREAM_STATUS__PAUSE) { - sdbRelease(pMnode->pSdb, pStream); - return 0; - } - mInfo("stream:%s,%" PRId64 " start to resume stream from pause", resumeReq.name, pStream->uid); if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pStream->targetDb) != 0) { sdbRelease(pMnode->pSdb, pStream); diff --git a/source/dnode/mnode/impl/src/mndStreamUtil.c b/source/dnode/mnode/impl/src/mndStreamUtil.c index a625ca81c9..7a38e68744 100644 --- a/source/dnode/mnode/impl/src/mndStreamUtil.c +++ b/source/dnode/mnode/impl/src/mndStreamUtil.c @@ -914,8 +914,7 @@ int32_t mndResetChkptReportInfo(SHashObj *pHash, int64_t streamId) { return TSDB_CODE_MND_STREAM_NOT_EXIST; } -static void mndShowStreamStatus(char *dst, SStreamObj *pStream) { - int8_t status = atomic_load_8(&pStream->status); +static void mndShowStreamStatus(char *dst, int8_t status) { if (status == STREAM_STATUS__NORMAL) { tstrncpy(dst, "ready", MND_STREAM_TRIGGER_NAME_SIZE); } else if (status == STREAM_STATUS__STOP) { @@ -951,6 +950,41 @@ static void int64ToHexStr(int64_t id, char *pBuf, int32_t bufLen) { varDataSetLen(pBuf, len + 2); } +static int32_t isAllTaskPaused(SStreamObj *pStream, bool *pRes) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamTaskIter *pIter = NULL; + bool isPaused = true; + + taosRLockLatch(&pStream->lock); + code = createStreamTaskIter(pStream, &pIter); + TSDB_CHECK_CODE(code, lino, _end); + + while (streamTaskIterNextTask(pIter)) { + SStreamTask *pTask = NULL; + code = streamTaskIterGetCurrent(pIter, &pTask); + TSDB_CHECK_CODE(code, lino, _end); + + STaskId id = {.streamId = pTask->id.streamId, .taskId = pTask->id.taskId}; + STaskStatusEntry *pe = taosHashGet(execInfo.pTaskMap, &id, sizeof(id)); + if (pe == NULL) { + continue; + } + if (pe->status != TASK_STATUS__PAUSE) { + isPaused = false; + } + } + (*pRes) = isPaused; + +_end: + destroyStreamTaskIter(pIter); + taosRUnLockLatch(&pStream->lock); + if (code != TSDB_CODE_SUCCESS) { + mError("error happens when get stream status, lino:%d, code:%s", lino, tstrerror(code)); + } + return code; +} + int32_t setStreamAttrInResBlock(SStreamObj *pStream, SSDataBlock *pBlock, int32_t numOfRows) { int32_t code = 0; int32_t cols = 0; @@ -999,7 +1033,15 @@ int32_t setStreamAttrInResBlock(SStreamObj *pStream, SSDataBlock *pBlock, int32_ char status[20 + VARSTR_HEADER_SIZE] = {0}; char status2[MND_STREAM_TRIGGER_NAME_SIZE] = {0}; - mndShowStreamStatus(status2, pStream); + bool isPaused = false; + code = isAllTaskPaused(pStream, &isPaused); + TSDB_CHECK_CODE(code, lino, _end); + + int8_t streamStatus = atomic_load_8(&pStream->status); + if (isPaused) { + streamStatus = STREAM_STATUS__PAUSE; + } + mndShowStreamStatus(status2, streamStatus); STR_WITH_MAXSIZE_TO_VARSTR(status, status2, sizeof(status)); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); TSDB_CHECK_NULL(pColInfo, code, lino, _end, terrno); @@ -1430,7 +1472,7 @@ int32_t mndFindChangedNodeInfo(SMnode *pMnode, const SArray *pPrevNodeList, cons return TSDB_CODE_INVALID_PARA; } - pInfo->pUpdateNodeList = taosArrayInit(4, sizeof(SNodeUpdateInfo)), + pInfo->pUpdateNodeList = taosArrayInit(4, sizeof(SNodeUpdateInfo)); pInfo->pDBMap = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), true, HASH_NO_LOCK); if (pInfo->pUpdateNodeList == NULL || pInfo->pDBMap == NULL) { diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index bcca01a230..e5ab02996a 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -43,6 +43,9 @@ static void mndCancelGetNextSubscribe(SMnode *pMnode, void *pIter); static int32_t mndCheckConsumer(SRpcMsg *pMsg, SHashObj *hash); static int32_t mndSetSubCommitLogs(STrans *pTrans, SMqSubscribeObj *pSub) { + if (pTrans == NULL || pSub == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; SSdbRaw *pCommitRaw = mndSubActionEncode(pSub); MND_TMQ_NULL_CHECK(pCommitRaw); @@ -68,6 +71,9 @@ int32_t mndInitSubscribe(SMnode *pMnode) { .deleteFp = (SdbDeleteFp)mndSubActionDelete, }; + if (pMnode == NULL) { + return TSDB_CODE_INVALID_PARA; + } mndSetMsgHandle(pMnode, TDMT_VND_TMQ_SUBSCRIBE_RSP, mndTransProcessRsp); mndSetMsgHandle(pMnode, TDMT_VND_TMQ_DELETE_SUB_RSP, mndTransProcessRsp); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_TIMER, mndProcessRebalanceReq); @@ -81,6 +87,9 @@ int32_t mndInitSubscribe(SMnode *pMnode) { } static int32_t mndCreateSubscription(SMnode *pMnode, const SMqTopicObj *pTopic, const char *subKey, SMqSubscribeObj** pSub) { + if(pMnode == NULL || pTopic == NULL || subKey == NULL || pSub == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; MND_TMQ_RETURN_CHECK(tNewSubscribeObj(subKey, pSub)); (*pSub)->dbUid = pTopic->dbUid; @@ -99,6 +108,9 @@ END: static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, SMqSubscribeObj *pSub, const SMqRebOutputVg *pRebVg, SSubplan *pPlan) { + if (pSub == NULL || pRebVg == NULL || pBuf == NULL || pLen == NULL) { + return TSDB_CODE_INVALID_PARA; + } SMqRebVgReq req = {0}; int32_t code = 0; SEncoder encoder = {0}; @@ -146,6 +158,9 @@ END: static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub, const SMqRebOutputVg *pRebVg, SSubplan *pPlan) { + if (pMnode == NULL || pTrans == NULL || pSub == NULL || pRebVg == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; void *buf = NULL; @@ -180,6 +195,9 @@ END: } static void mndSplitSubscribeKey(const char *key, char *topic, char *cgroup, bool fullName) { + if (key == NULL || topic == NULL || cgroup == NULL) { + return; + } int32_t i = 0; while (key[i] != TMQ_SEPARATOR_CHAR) { i++; @@ -197,6 +215,9 @@ static void mndSplitSubscribeKey(const char *key, char *topic, char *cgroup, boo } static int32_t mndGetOrCreateRebSub(SHashObj *pHash, const char *key, SMqRebInfo **pReb) { + if (pHash == NULL || key == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; SMqRebInfo* pRebInfo = taosHashGet(pHash, key, strlen(key) + 1); if (pRebInfo == NULL) { @@ -222,6 +243,9 @@ END: } static int32_t pushVgDataToHash(SArray *vgs, SHashObj *pHash, int64_t consumerId, char *key) { + if (vgs == NULL || pHash == NULL || key == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; SMqVgEp **pVgEp = (SMqVgEp **)taosArrayPop(vgs); MND_TMQ_NULL_CHECK(pVgEp); @@ -233,6 +257,9 @@ END: } static int32_t processRemovedConsumers(SMqRebOutputObj *pOutput, SHashObj *pHash, const SMqRebInputObj *pInput) { + if (pHash == NULL || pOutput == NULL || pInput == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; int32_t numOfRemoved = taosArrayGetSize(pInput->pRebInfo->removedConsumers); int32_t actualRemoved = 0; @@ -266,6 +293,9 @@ END: } static int32_t processNewConsumers(SMqRebOutputObj *pOutput, const SMqRebInputObj *pInput) { + if (pOutput == NULL || pInput == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; int32_t numOfNewConsumers = taosArrayGetSize(pInput->pRebInfo->newConsumers); @@ -285,6 +315,9 @@ END: } static int32_t processUnassignedVgroups(SMqRebOutputObj *pOutput, SHashObj *pHash) { + if (pOutput == NULL || pHash == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; int32_t numOfVgroups = taosArrayGetSize(pOutput->pSub->unassignedVgs); for (int32_t i = 0; i < numOfVgroups; i++) { @@ -296,6 +329,9 @@ END: static int32_t processModifiedConsumers(SMqRebOutputObj *pOutput, SHashObj *pHash, int32_t minVgCnt, int32_t remainderVgCnt) { + if (pOutput == NULL || pHash == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; int32_t cnt = 0; void *pIter = NULL; @@ -328,6 +364,9 @@ END: } static int32_t processRemoveAddVgs(SMnode *pMnode, SMqRebOutputObj *pOutput) { + if (pMnode == NULL || pOutput == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; int32_t totalVgNum = 0; SVgObj *pVgroup = NULL; @@ -403,6 +442,9 @@ END: } static int32_t processSubOffsetRows(SMnode *pMnode, const SMqRebInputObj *pInput, SMqRebOutputObj *pOutput) { + if (pMnode == NULL || pInput == NULL || pOutput == NULL) { + return TSDB_CODE_INVALID_PARA; + } SMqSubscribeObj *pSub = NULL; int32_t code = mndAcquireSubscribeByKey(pMnode, pInput->pRebInfo->key, &pSub); // put all offset rows if( code != 0){ @@ -465,6 +507,7 @@ END: } static void printRebalanceLog(SMqRebOutputObj *pOutput) { + if (pOutput == NULL) return; mInfo("sub:%s mq rebalance calculation completed, re-balanced vg", pOutput->pSub->key); for (int32_t i = 0; i < taosArrayGetSize(pOutput->rebVgs); i++) { SMqRebOutputVg *pOutputRebVg = taosArrayGet(pOutput->rebVgs, i); @@ -492,6 +535,9 @@ static void printRebalanceLog(SMqRebOutputObj *pOutput) { static void calcVgroupsCnt(const SMqRebInputObj *pInput, int32_t totalVgNum, const char *pSubKey, int32_t *minVgCnt, int32_t *remainderVgCnt) { + if (pInput == NULL || pSubKey == NULL || minVgCnt == NULL || remainderVgCnt == NULL) { + return; + } int32_t numOfRemoved = taosArrayGetSize(pInput->pRebInfo->removedConsumers); int32_t numOfAdded = taosArrayGetSize(pInput->pRebInfo->newConsumers); int32_t numOfFinal = pInput->oldConsumerNum + numOfAdded - numOfRemoved; @@ -509,6 +555,9 @@ static void calcVgroupsCnt(const SMqRebInputObj *pInput, int32_t totalVgNum, con } static int32_t assignVgroups(SMqRebOutputObj *pOutput, SHashObj *pHash, int32_t minVgCnt) { + if (pOutput == NULL || pHash == NULL) { + return TSDB_CODE_INVALID_PARA; + } SMqRebOutputVg *pRebVg = NULL; void *pAssignIter = NULL; void *pIter = NULL; @@ -580,6 +629,9 @@ END: } static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqRebOutputObj *pOutput) { + if (pMnode == NULL || pInput == NULL || pOutput == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t totalVgNum = processRemoveAddVgs(pMnode, pOutput); if (totalVgNum < 0){ return totalVgNum; @@ -605,6 +657,9 @@ END: } static int32_t presistConsumerByType(STrans *pTrans, SArray *consumers, int8_t type, char *cgroup, char *topic) { + if (pTrans == NULL || consumers == NULL || cgroup == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; SMqConsumerObj *pConsumerNew = NULL; int32_t consumerNum = taosArrayGetSize(consumers); @@ -623,6 +678,9 @@ END: } static int32_t mndPresistConsumer(STrans *pTrans, const SMqRebOutputObj *pOutput, char *cgroup, char *topic) { + if (pTrans == NULL || pOutput == NULL || cgroup == NULL || topic == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; MND_TMQ_RETURN_CHECK(presistConsumerByType(pTrans, pOutput->modifyConsumers, CONSUMER_UPDATE_REB, cgroup, NULL)); MND_TMQ_RETURN_CHECK(presistConsumerByType(pTrans, pOutput->newConsumers, CONSUMER_ADD_REB, cgroup, topic)); @@ -632,6 +690,9 @@ END: } static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOutputObj *pOutput) { + if (pMnode == NULL || pMsg == NULL || pOutput == NULL) { + return TSDB_CODE_INVALID_PARA; + } struct SSubplan *pPlan = NULL; int32_t code = 0; STrans *pTrans = NULL; @@ -682,6 +743,7 @@ END: } static void freeRebalanceItem(void *param) { + if (param == NULL) return; SMqRebInfo *pInfo = param; taosArrayDestroy(pInfo->newConsumers); taosArrayDestroy(pInfo->removedConsumers); @@ -689,6 +751,9 @@ static void freeRebalanceItem(void *param) { // type = 0 remove type = 1 add static int32_t buildRebInfo(SHashObj *rebSubHash, SArray *topicList, int8_t type, char *group, int64_t consumerId) { + if (rebSubHash == NULL || topicList == NULL || group == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; int32_t topicNum = taosArrayGetSize(topicList); for (int32_t i = 0; i < topicNum; i++) { @@ -709,6 +774,9 @@ END: } static void checkForVgroupSplit(SMnode *pMnode, SMqConsumerObj *pConsumer, SHashObj *rebSubHash) { + if (pMnode == NULL || pConsumer == NULL || rebSubHash == NULL) { + return; + } int32_t newTopicNum = taosArrayGetSize(pConsumer->currentTopics); for (int32_t i = 0; i < newTopicNum; i++) { char *topic = taosArrayGetP(pConsumer->currentTopics, i); @@ -754,6 +822,9 @@ static void checkForVgroupSplit(SMnode *pMnode, SMqConsumerObj *pConsumer, SHash } static int32_t mndCheckConsumer(SRpcMsg *pMsg, SHashObj *rebSubHash) { + if (pMsg == NULL || rebSubHash == NULL) { + return TSDB_CODE_INVALID_PARA; + } SMnode *pMnode = pMsg->info.node; SSdb *pSdb = pMnode->pSdb; SMqConsumerObj *pConsumer = NULL; @@ -818,6 +889,9 @@ void mndRebCntDec() { } static void clearRebOutput(SMqRebOutputObj *rebOutput) { + if (rebOutput == NULL) { + return; + } taosArrayDestroy(rebOutput->newConsumers); taosArrayDestroy(rebOutput->modifyConsumers); taosArrayDestroy(rebOutput->removedConsumers); @@ -827,6 +901,9 @@ static void clearRebOutput(SMqRebOutputObj *rebOutput) { } static int32_t initRebOutput(SMqRebOutputObj *rebOutput) { + if (rebOutput == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; rebOutput->newConsumers = taosArrayInit(0, sizeof(int64_t)); MND_TMQ_NULL_CHECK(rebOutput->newConsumers); @@ -845,6 +922,9 @@ END: // This function only works when there are dirty consumers static int32_t checkConsumer(SMnode *pMnode, SMqSubscribeObj *pSub) { + if (pMnode == NULL || pSub == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; void *pIter = NULL; while (1) { @@ -871,6 +951,9 @@ END: } static int32_t buildRebOutput(SMnode *pMnode, SMqRebInputObj *rebInput, SMqRebOutputObj *rebOutput) { + if (pMnode == NULL || rebInput == NULL || rebOutput == NULL) { + return TSDB_CODE_INVALID_PARA; + } const char *key = rebInput->pRebInfo->key; SMqSubscribeObj *pSub = NULL; int32_t code = mndAcquireSubscribeByKey(pMnode, key, &pSub); @@ -922,6 +1005,9 @@ END: } static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) { + if (pMsg == NULL) { + return TSDB_CODE_INVALID_PARA; + } int code = 0; void *pIter = NULL; SMnode *pMnode = pMsg->info.node; @@ -986,6 +1072,9 @@ END: } static int32_t sendDeleteSubToVnode(SMnode *pMnode, SMqSubscribeObj *pSub, STrans *pTrans) { + if (pMnode == NULL || pSub == NULL || pTrans == NULL) { + return TSDB_CODE_INVALID_PARA; + } void *pIter = NULL; SVgObj *pVgObj = NULL; int32_t code = 0; @@ -1024,6 +1113,9 @@ END: } static int32_t mndCheckConsumerByGroup(SMnode *pMnode, STrans *pTrans, char *cgroup, char *topic) { + if (pMnode == NULL || pTrans == NULL || cgroup == NULL || topic == NULL) { + return TSDB_CODE_INVALID_PARA; + } void *pIter = NULL; SMqConsumerObj *pConsumer = NULL; int code = 0; @@ -1056,6 +1148,9 @@ END: } static int32_t mndProcessDropCgroupReq(SRpcMsg *pMsg) { + if (pMsg == NULL) { + return TSDB_CODE_INVALID_PARA; + } SMnode *pMnode = pMsg->info.node; SMDropCgroupReq dropReq = {0}; STrans *pTrans = NULL; @@ -1109,6 +1204,9 @@ END: void mndCleanupSubscribe(SMnode *pMnode) {} static SSdbRaw *mndSubActionEncode(SMqSubscribeObj *pSub) { + if (pSub == NULL) { + return NULL; + } int32_t code = 0; int32_t lino = 0; terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -1149,6 +1247,9 @@ SUB_ENCODE_OVER: } static SSdbRow *mndSubActionDecode(SSdbRaw *pRaw) { + if (pRaw == NULL) { + return NULL; + } int32_t code = 0; int32_t lino = 0; terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -1218,17 +1319,18 @@ SUB_DECODE_OVER: } static int32_t mndSubActionInsert(SSdb *pSdb, SMqSubscribeObj *pSub) { - mTrace("subscribe:%s, perform insert action", pSub->key); + mTrace("subscribe:%s, perform insert action", pSub != NULL ? pSub->key : "null"); return 0; } static int32_t mndSubActionDelete(SSdb *pSdb, SMqSubscribeObj *pSub) { - mTrace("subscribe:%s, perform delete action", pSub->key); + mTrace("subscribe:%s, perform delete action", pSub != NULL ? pSub->key : "null"); tDeleteSubscribeObj(pSub); return 0; } static int32_t mndSubActionUpdate(SSdb *pSdb, SMqSubscribeObj *pOldSub, SMqSubscribeObj *pNewSub) { + if (pOldSub == NULL || pNewSub == NULL) return -1; mTrace("subscribe:%s, perform update action", pOldSub->key); taosWLockLatch(&pOldSub->lock); @@ -1249,6 +1351,9 @@ static int32_t mndSubActionUpdate(SSdb *pSdb, SMqSubscribeObj *pOldSub, SMqSubsc } int32_t mndAcquireSubscribeByKey(SMnode *pMnode, const char *key, SMqSubscribeObj** pSub) { + if (pMnode == NULL || key == NULL || pSub == NULL){ + return TSDB_CODE_INVALID_PARA; + } SSdb *pSdb = pMnode->pSdb; *pSub = sdbAcquire(pSdb, SDB_SUBSCRIBE, key); if (*pSub == NULL) { @@ -1258,6 +1363,7 @@ int32_t mndAcquireSubscribeByKey(SMnode *pMnode, const char *key, SMqSubscribeOb } int32_t mndGetGroupNumByTopic(SMnode *pMnode, const char *topicName) { + if (pMnode == NULL || topicName == NULL) return 0; int32_t num = 0; SSdb *pSdb = pMnode->pSdb; @@ -1283,11 +1389,13 @@ int32_t mndGetGroupNumByTopic(SMnode *pMnode, const char *topicName) { } void mndReleaseSubscribe(SMnode *pMnode, SMqSubscribeObj *pSub) { + if (pMnode == NULL || pSub == NULL) return; SSdb *pSdb = pMnode->pSdb; sdbRelease(pSdb, pSub); } int32_t mndSetDropSubCommitLogs(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub) { + if (pMnode == NULL || pTrans == NULL || pSub == NULL) return TSDB_CODE_INVALID_PARA; int32_t code = 0; SSdbRaw *pCommitRaw = mndSubActionEncode(pSub); MND_TMQ_NULL_CHECK(pCommitRaw); @@ -1302,6 +1410,7 @@ END: } int32_t mndDropSubByTopic(SMnode *pMnode, STrans *pTrans, const char *topicName) { + if (pMnode == NULL || pTrans == NULL || topicName == NULL) return TSDB_CODE_INVALID_PARA; SSdb *pSdb = pMnode->pSdb; int32_t code = 0; void *pIter = NULL; @@ -1337,6 +1446,9 @@ END: static int32_t buildResult(SSDataBlock *pBlock, int32_t *numOfRows, int64_t consumerId, const char* user, const char* fqdn, const char *topic, const char *cgroup, SArray *vgs, SArray *offsetRows) { + if (pBlock == NULL || numOfRows == NULL || topic == NULL || cgroup == NULL){ + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; int32_t sz = taosArrayGetSize(vgs); for (int32_t j = 0; j < sz; j++) { @@ -1424,6 +1536,9 @@ END: } int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rowsCapacity) { + if (pReq == NULL || pShow == NULL || pBlock == NULL){ + return TSDB_CODE_INVALID_PARA; + } SMnode *pMnode = pReq->info.node; SSdb *pSdb = pMnode->pSdb; int32_t numOfRows = 0; @@ -1485,10 +1600,16 @@ int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock return numOfRows; END: + taosRUnLockLatch(&pSub->lock); + sdbRelease(pSdb, pSub); + return code; } void mndCancelGetNextSubscribe(SMnode *pMnode, void *pIter) { + if (pMnode == NULL) { + return; + } SSdb *pSdb = pMnode->pSdb; sdbCancelFetchByType(pSdb, pIter, SDB_SUBSCRIBE); } diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index b5a74e865f..7ed970be62 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -14,11 +14,11 @@ */ #define _DEFAULT_SOURCE -#include "mndSync.h" #include "mndCluster.h" +#include "mndStream.h" +#include "mndSync.h" #include "mndTrans.h" #include "mndUser.h" -#include "mndStream.h" static int32_t mndSyncEqCtrlMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) { if (pMsg == NULL || pMsg->pCont == NULL) { @@ -309,6 +309,9 @@ void mndRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx) { } else { mInfo("vgId:1, sync restore finished, repeat call"); } + if (sdbAfterRestored(pMnode->pSdb) != 0) { + mError("failed to prepare sdb while start mnode"); + } } else { mInfo("vgId:1, sync restore finished"); } @@ -507,7 +510,7 @@ int32_t mndInitSync(SMnode *pMnode) { mError("failed to open sync, tsem_init, since %s", tstrerror(code)); TAOS_RETURN(code); } - pMgmt->sync = syncOpen(&syncInfo, 1); // always check + pMgmt->sync = syncOpen(&syncInfo, 1); // always check if (pMgmt->sync <= 0) { if (terrno != 0) code = terrno; mError("failed to open sync since %s", tstrerror(code)); @@ -546,7 +549,7 @@ void mndSyncCheckTimeout(SMnode *pMnode) { // pMgmt->transSeq = 0; // terrno = TSDB_CODE_SYN_TIMEOUT; // pMgmt->errCode = TSDB_CODE_SYN_TIMEOUT; - //if (tsem_post(&pMgmt->syncSem) < 0) { + // if (tsem_post(&pMgmt->syncSem) < 0) { // mError("failed to post sem"); //} } else { diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index 643bab568f..5c199eddbd 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -55,6 +55,9 @@ int32_t mndInitTopic(SMnode *pMnode) { .deleteFp = (SdbDeleteFp)mndTopicActionDelete, }; + if (pMnode == NULL) { + return TSDB_CODE_INVALID_PARA; + } mndSetMsgHandle(pMnode, TDMT_MND_TMQ_CREATE_TOPIC, mndProcessCreateTopicReq); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_DROP_TOPIC, mndProcessDropTopicReq); mndSetMsgHandle(pMnode, TDMT_VND_TMQ_ADD_CHECKINFO_RSP, mndTransProcessRsp); @@ -81,6 +84,9 @@ void mndTopicGetShowName(const char* fullTopic, char* topic) { } SSdbRaw *mndTopicActionEncode(SMqTopicObj *pTopic) { + if (pTopic == NULL) { + return NULL; + } int32_t code = 0; int32_t lino = 0; terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -172,6 +178,7 @@ TOPIC_ENCODE_OVER: } SSdbRow *mndTopicActionDecode(SSdbRaw *pRaw) { + if (pRaw == NULL) return NULL; int32_t code = 0; int32_t lino = 0; terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -193,7 +200,7 @@ SSdbRow *mndTopicActionDecode(SSdbRaw *pRaw) { pTopic = sdbGetRowObj(pRow); if (pTopic == NULL) goto TOPIC_DECODE_OVER; - int32_t len; + int32_t len = 0; int32_t dataPos = 0; SDB_GET_BINARY(pRaw, dataPos, pTopic->name, TSDB_TOPIC_FNAME_LEN, TOPIC_DECODE_OVER); SDB_GET_BINARY(pRaw, dataPos, pTopic->db, TSDB_DB_FNAME_LEN, TOPIC_DECODE_OVER); @@ -292,11 +299,12 @@ TOPIC_DECODE_OVER: } static int32_t mndTopicActionInsert(SSdb *pSdb, SMqTopicObj *pTopic) { - mTrace("topic:%s perform insert action", pTopic->name); + mTrace("topic:%s perform insert action", pTopic != NULL ? pTopic->name : "null"); return 0; } static int32_t mndTopicActionDelete(SSdb *pSdb, SMqTopicObj *pTopic) { + if (pTopic == NULL) return 0; mTrace("topic:%s perform delete action", pTopic->name); taosMemoryFreeClear(pTopic->sql); taosMemoryFreeClear(pTopic->ast); @@ -307,6 +315,7 @@ static int32_t mndTopicActionDelete(SSdb *pSdb, SMqTopicObj *pTopic) { } static int32_t mndTopicActionUpdate(SSdb *pSdb, SMqTopicObj *pOldTopic, SMqTopicObj *pNewTopic) { + if (pOldTopic == NULL || pNewTopic == NULL) return 0; mTrace("topic:%s perform update action", pOldTopic->name); (void)atomic_exchange_64(&pOldTopic->updateTime, pNewTopic->updateTime); (void)atomic_exchange_32(&pOldTopic->version, pNewTopic->version); @@ -315,6 +324,9 @@ static int32_t mndTopicActionUpdate(SSdb *pSdb, SMqTopicObj *pOldTopic, SMqTopic } int32_t mndAcquireTopic(SMnode *pMnode, const char *topicName, SMqTopicObj **pTopic) { + if (pMnode == NULL || topicName == NULL || pTopic == NULL){ + return TSDB_CODE_INVALID_PARA; + } SSdb *pSdb = pMnode->pSdb; *pTopic = sdbAcquire(pSdb, SDB_TOPIC, topicName); if (*pTopic == NULL) { @@ -324,11 +336,13 @@ int32_t mndAcquireTopic(SMnode *pMnode, const char *topicName, SMqTopicObj **pTo } void mndReleaseTopic(SMnode *pMnode, SMqTopicObj *pTopic) { + if (pMnode == NULL) return; SSdb *pSdb = pMnode->pSdb; sdbRelease(pSdb, pTopic); } static int32_t mndCheckCreateTopicReq(SCMCreateTopicReq *pCreate) { + if (pCreate == NULL) return TSDB_CODE_INVALID_PARA; if (pCreate->sql == NULL) return TSDB_CODE_MND_INVALID_TOPIC; if (pCreate->subType == TOPIC_SUB_TYPE__COLUMN) { @@ -343,6 +357,7 @@ static int32_t mndCheckCreateTopicReq(SCMCreateTopicReq *pCreate) { } static int32_t extractTopicTbInfo(SNode *pAst, SMqTopicObj *pTopic) { + if (pAst == NULL || pTopic == NULL) return TSDB_CODE_INVALID_PARA; SNodeList *pNodeList = NULL; int32_t code = 0; MND_TMQ_RETURN_CHECK(nodesCollectColumns((SSelectStmt *)pAst, SQL_CLAUSE_FROM, NULL, COLLECT_COL_TYPE_ALL, &pNodeList)); @@ -367,6 +382,7 @@ END: } static int32_t sendCheckInfoToVnode(STrans *pTrans, SMnode *pMnode, SMqTopicObj *topicObj){ + if (pTrans == NULL || pMnode == NULL || topicObj == NULL) return TSDB_CODE_INVALID_PARA; STqCheckInfo info = {0}; (void)memcpy(info.topic, topicObj->name, TSDB_TOPIC_FNAME_LEN); info.ntbUid = topicObj->ntbUid; @@ -388,7 +404,7 @@ static int32_t sendCheckInfoToVnode(STrans *pTrans, SMnode *pMnode, SMqTopicObj } // encoder check alter info - int32_t len; + int32_t len = 0; tEncodeSize(tEncodeSTqCheckInfo, &info, len, code); if (code != 0) { code = TSDB_CODE_OUT_OF_MEMORY; @@ -426,6 +442,7 @@ END: static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *pCreate, SDbObj *pDb, const char *userName) { + if (pMnode == NULL || pReq == NULL || pCreate == NULL || pDb == NULL || userName == NULL) return TSDB_CODE_INVALID_PARA; mInfo("start to create topic:%s", pCreate->name); STrans *pTrans = NULL; int32_t code = 0; @@ -519,6 +536,9 @@ END: } static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) { + if (pReq == NULL || pReq->contLen <= 0) { + return TSDB_CODE_INVALID_MSG; + } SMnode *pMnode = pReq->info.node; int32_t code = TDB_CODE_SUCCESS; SMqTopicObj *pTopic = NULL; @@ -596,6 +616,9 @@ END: } static int32_t mndDropTopic(SMnode *pMnode, STrans *pTrans, SRpcMsg *pReq, SMqTopicObj *pTopic) { + if (pMnode == NULL || pTrans == NULL || pReq == NULL || pTopic == NULL) { + return TSDB_CODE_INVALID_MSG; + } int32_t code = 0; SSdbRaw *pCommitRaw = NULL; MND_TMQ_RETURN_CHECK(mndUserRemoveTopic(pMnode, pTrans, pTopic->name)); @@ -614,6 +637,9 @@ END: } bool checkTopic(SArray *topics, char *topicName){ + if (topics == NULL || topicName == NULL) { + return false; + } int32_t sz = taosArrayGetSize(topics); for (int32_t i = 0; i < sz; i++) { char *name = taosArrayGetP(topics, i); @@ -625,6 +651,9 @@ bool checkTopic(SArray *topics, char *topicName){ } static int32_t mndCheckConsumerByTopic(SMnode *pMnode, STrans *pTrans, char *topicName){ + if (pMnode == NULL || pTrans == NULL || topicName == NULL) { + return TSDB_CODE_INVALID_MSG; + } int32_t code = 0; SSdb *pSdb = pMnode->pSdb; void *pIter = NULL; @@ -653,6 +682,9 @@ END: } static int32_t mndDropCheckInfoByTopic(SMnode *pMnode, STrans *pTrans, SMqTopicObj *pTopic){ + if (pMnode == NULL || pTrans == NULL || pTopic == NULL) { + return TSDB_CODE_INVALID_MSG; + } // broadcast to all vnode void *pIter = NULL; SVgObj *pVgroup = NULL; @@ -693,6 +725,9 @@ END: } static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { + if (pReq == NULL) { + return TSDB_CODE_INVALID_MSG; + } SMnode *pMnode = pReq->info.node; SMDropTopicReq dropReq = {0}; int32_t code = 0; @@ -756,6 +791,9 @@ END: } int32_t mndGetNumOfTopics(SMnode *pMnode, char *dbName, int32_t *pNumOfTopics) { + if (pMnode == NULL || dbName == NULL || pNumOfTopics == NULL) { + return TSDB_CODE_INVALID_MSG; + } *pNumOfTopics = 0; SSdb *pSdb = pMnode->pSdb; @@ -786,6 +824,9 @@ int32_t mndGetNumOfTopics(SMnode *pMnode, char *dbName, int32_t *pNumOfTopics) { } static void schemaToJson(SSchema *schema, int32_t nCols, char *schemaJson){ + if (schema == NULL || schemaJson == NULL) { + return; + } char* string = NULL; int32_t code = 0; cJSON* columns = cJSON_CreateArray(); @@ -838,6 +879,9 @@ END: } static int32_t mndRetrieveTopic(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rowsCapacity) { + if (pReq == NULL || pShow == NULL || pBlock == NULL) { + return TSDB_CODE_INVALID_MSG; + } SMnode *pMnode = pReq->info.node; SSdb *pSdb = pMnode->pSdb; int32_t numOfRows = 0; @@ -945,11 +989,15 @@ END: } static void mndCancelGetNextTopic(SMnode *pMnode, void *pIter) { + if (pMnode == NULL) return; SSdb *pSdb = pMnode->pSdb; sdbCancelFetchByType(pSdb, pIter, SDB_TOPIC); } bool mndTopicExistsForDb(SMnode *pMnode, SDbObj *pDb) { + if (pMnode == NULL || pDb == NULL) { + return false; + } SSdb *pSdb = pMnode->pSdb; void *pIter = NULL; SMqTopicObj *pTopic = NULL; diff --git a/source/dnode/mnode/impl/src/mndUser.c b/source/dnode/mnode/impl/src/mndUser.c index e1518d3752..5b2a5fa8aa 100644 --- a/source/dnode/mnode/impl/src/mndUser.c +++ b/source/dnode/mnode/impl/src/mndUser.c @@ -1805,12 +1805,21 @@ _OVER: TAOS_RETURN(code); } -static int32_t mndCheckPasswordFmt(const char *pwd) { - int32_t len = strlen(pwd); - if (len < TSDB_PASSWORD_MIN_LEN || len > TSDB_PASSWORD_MAX_LEN) { +static int32_t mndCheckPasswordMinLen(const char *pwd, int32_t len) { + if (len < TSDB_PASSWORD_MIN_LEN) { return -1; } + return 0; +} +static int32_t mndCheckPasswordMaxLen(const char *pwd, int32_t len) { + if (len > TSDB_PASSWORD_MAX_LEN) { + return -1; + } + return 0; +} + +static int32_t mndCheckPasswordFmt(const char *pwd, int32_t len) { if (strcmp(pwd, "taosdata") == 0) { return 0; } @@ -1875,14 +1884,17 @@ static int32_t mndProcessCreateUserReq(SRpcMsg *pReq) { TAOS_CHECK_GOTO(TSDB_CODE_MND_INVALID_USER_FORMAT, &lino, _OVER); } - if (mndCheckPasswordFmt(createReq.pass) != 0) { - TAOS_CHECK_GOTO(TSDB_CODE_MND_INVALID_PASS_FORMAT, &lino, _OVER); - } - + int32_t len = strlen(createReq.pass); if (createReq.isImport != 1) { - if (strlen(createReq.pass) >= TSDB_PASSWORD_LEN) { + if (mndCheckPasswordMinLen(createReq.pass, len) != 0) { + TAOS_CHECK_GOTO(TSDB_CODE_PAR_PASSWD_TOO_SHORT_OR_EMPTY, &lino, _OVER); + } + if (mndCheckPasswordMaxLen(createReq.pass, len) != 0) { TAOS_CHECK_GOTO(TSDB_CODE_PAR_NAME_OR_PASSWD_TOO_LONG, &lino, _OVER); } + if (mndCheckPasswordFmt(createReq.pass, len) != 0) { + TAOS_CHECK_GOTO(TSDB_CODE_MND_INVALID_PASS_FORMAT, &lino, _OVER); + } } code = mndAcquireUser(pMnode, createReq.user, &pUser); @@ -2364,8 +2376,17 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) { TAOS_CHECK_GOTO(TSDB_CODE_MND_INVALID_USER_FORMAT, &lino, _OVER); } - if (TSDB_ALTER_USER_PASSWD == alterReq.alterType && mndCheckPasswordFmt(alterReq.pass) != 0) { - TAOS_CHECK_GOTO(TSDB_CODE_MND_INVALID_PASS_FORMAT, &lino, _OVER); + if (TSDB_ALTER_USER_PASSWD == alterReq.alterType) { + int32_t len = strlen(alterReq.pass); + if (mndCheckPasswordMinLen(alterReq.pass, len) != 0) { + TAOS_CHECK_GOTO(TSDB_CODE_PAR_PASSWD_TOO_SHORT_OR_EMPTY, &lino, _OVER); + } + if (mndCheckPasswordMaxLen(alterReq.pass, len) != 0) { + TAOS_CHECK_GOTO(TSDB_CODE_PAR_NAME_OR_PASSWD_TOO_LONG, &lino, _OVER); + } + if (mndCheckPasswordFmt(alterReq.pass, len) != 0) { + TAOS_CHECK_GOTO(TSDB_CODE_MND_INVALID_PASS_FORMAT, &lino, _OVER); + } } TAOS_CHECK_GOTO(mndAcquireUser(pMnode, alterReq.user, &pUser), &lino, _OVER); diff --git a/source/dnode/mnode/sdb/inc/sdb.h b/source/dnode/mnode/sdb/inc/sdb.h index eff26bf412..114a5ca59b 100644 --- a/source/dnode/mnode/sdb/inc/sdb.h +++ b/source/dnode/mnode/sdb/inc/sdb.h @@ -117,7 +117,7 @@ typedef int32_t (*SdbInsertFp)(SSdb *pSdb, void *pObj); typedef int32_t (*SdbUpdateFp)(SSdb *pSdb, void *pSrcObj, void *pDstObj); typedef int32_t (*SdbDeleteFp)(SSdb *pSdb, void *pObj, bool callFunc); typedef int32_t (*SdbDeployFp)(SMnode *pMnode); -typedef int32_t (*SdbPrepareFp)(SMnode *pMnode); +typedef int32_t (*SdbAfterRestoredFp)(SMnode *pMnode); typedef int32_t (*SdbValidateFp)(SMnode *pMnode, void *pTrans, SSdbRaw *pRaw); typedef SSdbRow *(*SdbDecodeFp)(SSdbRaw *pRaw); typedef SSdbRaw *(*SdbEncodeFp)(void *pObj); @@ -188,31 +188,31 @@ typedef struct SSdbRow { } SSdbRow; typedef struct SSdb { - SMnode *pMnode; - SWal *pWal; - int64_t sync; - char *currDir; - char *tmpDir; - int64_t commitIndex; - int64_t commitTerm; - int64_t commitConfig; - int64_t applyIndex; - int64_t applyTerm; - int64_t applyConfig; - int64_t tableVer[SDB_MAX]; - int64_t maxId[SDB_MAX]; - EKeyType keyTypes[SDB_MAX]; - SHashObj *hashObjs[SDB_MAX]; - TdThreadRwlock locks[SDB_MAX]; - SdbInsertFp insertFps[SDB_MAX]; - SdbUpdateFp updateFps[SDB_MAX]; - SdbDeleteFp deleteFps[SDB_MAX]; - SdbDeployFp deployFps[SDB_MAX]; - SdbPrepareFp prepareFps[SDB_MAX]; - SdbEncodeFp encodeFps[SDB_MAX]; - SdbDecodeFp decodeFps[SDB_MAX]; - SdbValidateFp validateFps[SDB_MAX]; - TdThreadMutex filelock; + SMnode *pMnode; + SWal *pWal; + int64_t sync; + char *currDir; + char *tmpDir; + int64_t commitIndex; + int64_t commitTerm; + int64_t commitConfig; + int64_t applyIndex; + int64_t applyTerm; + int64_t applyConfig; + int64_t tableVer[SDB_MAX]; + int64_t maxId[SDB_MAX]; + EKeyType keyTypes[SDB_MAX]; + SHashObj *hashObjs[SDB_MAX]; + TdThreadRwlock locks[SDB_MAX]; + SdbInsertFp insertFps[SDB_MAX]; + SdbUpdateFp updateFps[SDB_MAX]; + SdbDeleteFp deleteFps[SDB_MAX]; + SdbDeployFp deployFps[SDB_MAX]; + SdbAfterRestoredFp afterRestoredFps[SDB_MAX]; + SdbEncodeFp encodeFps[SDB_MAX]; + SdbDecodeFp decodeFps[SDB_MAX]; + SdbValidateFp validateFps[SDB_MAX]; + TdThreadMutex filelock; } SSdb; typedef struct SSdbIter { @@ -222,16 +222,16 @@ typedef struct SSdbIter { } SSdbIter; typedef struct { - ESdbType sdbType; - EKeyType keyType; - SdbDeployFp deployFp; - SdbPrepareFp prepareFp; - SdbEncodeFp encodeFp; - SdbDecodeFp decodeFp; - SdbInsertFp insertFp; - SdbUpdateFp updateFp; - SdbDeleteFp deleteFp; - SdbValidateFp validateFp; + ESdbType sdbType; + EKeyType keyType; + SdbDeployFp deployFp; + SdbAfterRestoredFp afterRestoredFp; + SdbEncodeFp encodeFp; + SdbDecodeFp decodeFp; + SdbInsertFp insertFp; + SdbUpdateFp updateFp; + SdbDeleteFp deleteFp; + SdbValidateFp validateFp; } SSdbTable; typedef struct SSdbOpt { @@ -279,7 +279,7 @@ int32_t sdbDeploy(SSdb *pSdb); * @param pSdb The sdb object. * @return int32_t 0 for success, -1 for failure. */ -int32_t sdbPrepare(SSdb *pSdb); +int32_t sdbAfterRestored(SSdb *pSdb); /** * @brief Load sdb from file. diff --git a/source/dnode/mnode/sdb/src/sdb.c b/source/dnode/mnode/sdb/src/sdb.c index 6a273f9ab3..ff3d87819b 100644 --- a/source/dnode/mnode/sdb/src/sdb.c +++ b/source/dnode/mnode/sdb/src/sdb.c @@ -127,7 +127,7 @@ int32_t sdbSetTable(SSdb *pSdb, SSdbTable table) { pSdb->deployFps[sdbType] = table.deployFp; pSdb->encodeFps[sdbType] = table.encodeFp; pSdb->decodeFps[sdbType] = table.decodeFp; - pSdb->prepareFps[sdbType] = table.prepareFp; + pSdb->afterRestoredFps[sdbType] = table.afterRestoredFp; pSdb->validateFps[sdbType] = table.validateFp; int32_t hashType = 0; diff --git a/source/dnode/mnode/sdb/src/sdbFile.c b/source/dnode/mnode/sdb/src/sdbFile.c index 2d752a2aff..4b1404e41d 100644 --- a/source/dnode/mnode/sdb/src/sdbFile.c +++ b/source/dnode/mnode/sdb/src/sdbFile.c @@ -48,12 +48,12 @@ static int32_t sdbDeployData(SSdb *pSdb) { return 0; } -static int32_t sdbPrepareData(SSdb *pSdb) { +static int32_t sdbAfterRestoredData(SSdb *pSdb) { int32_t code = 0; mInfo("start to prepare sdb"); for (int32_t i = SDB_MAX - 1; i >= 0; --i) { - SdbPrepareFp fp = pSdb->prepareFps[i]; + SdbAfterRestoredFp fp = pSdb->afterRestoredFps[i]; if (fp == NULL) continue; mInfo("start to prepare sdb:%s", sdbTableName(i)); @@ -666,9 +666,9 @@ int32_t sdbDeploy(SSdb *pSdb) { return 0; } -int32_t sdbPrepare(SSdb *pSdb) { +int32_t sdbAfterRestored(SSdb *pSdb) { int32_t code = 0; - code = sdbPrepareData(pSdb); + code = sdbAfterRestoredData(pSdb); if (code != 0) { TAOS_RETURN(code); } diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index 5d4ffc604a..b33bdb0976 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -51,7 +51,7 @@ extern const SVnodeCfg vnodeCfgDefault; typedef void (*StopDnodeFp)(); -int32_t vnodeInit(int32_t nthreads, StopDnodeFp stopDnodeFp); +int32_t vnodeInit(StopDnodeFp stopDnodeFp); void vnodeCleanup(); int32_t vnodeCreate(const char *path, SVnodeCfg *pCfg, int32_t diskPrimary, STfs *pTfs); bool vnodeShouldRemoveWal(SVnode *pVnode); @@ -327,7 +327,7 @@ struct SVnodeCfg { int16_t hashSuffix; int32_t tsdbPageSize; int32_t tdbEncryptAlgorithm; - char tdbEncryptKey[ENCRYPT_KEY_LEN]; + char tdbEncryptKey[ENCRYPT_KEY_LEN + 1]; int32_t s3ChunkSize; int32_t s3KeepLocal; int8_t s3Compact; diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index 3c40100f9d..28a0d11757 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -127,11 +127,11 @@ void tqPushEmptyDataRsp(STqHandle* pHandle, int32_t vgId); int32_t tqMetaOpen(STQ* pTq); void tqMetaClose(STQ* pTq); int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle); -int32_t tqMetaSaveInfo(STQ* pTq, TTB* ttb, const void* key, int32_t kLen, const void* value, int32_t vLen); -int32_t tqMetaDeleteInfo(STQ* pTq, TTB* ttb, const void* key, int32_t kLen); +int32_t tqMetaSaveInfo(STQ* pTq, TTB* ttb, const void* key, uint32_t kLen, const void* value, uint32_t vLen); +int32_t tqMetaDeleteInfo(STQ* pTq, TTB* ttb, const void* key, uint32_t kLen); int32_t tqMetaCreateHandle(STQ* pTq, SMqRebVgReq* req, STqHandle* handle); -int32_t tqMetaDecodeCheckInfo(STqCheckInfo *info, void *pVal, int32_t vLen); -int32_t tqMetaDecodeOffsetInfo(STqOffset *info, void *pVal, int32_t vLen); +int32_t tqMetaDecodeCheckInfo(STqCheckInfo *info, void *pVal, uint32_t vLen); +int32_t tqMetaDecodeOffsetInfo(STqOffset *info, void *pVal, uint32_t vLen); int32_t tqMetaSaveOffset(STQ* pTq, STqOffset* pOffset); int32_t tqMetaGetHandle(STQ* pTq, const char* key, STqHandle** pHandle); int32_t tqMetaGetOffset(STQ* pTq, const char* subkey, STqOffset** pOffset); diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index 29248e360a..47890e9b4b 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -1083,9 +1083,6 @@ void tsdbRemoveFile(const char *path); } \ } while (0) -int32_t tsdbInit(); -void tsdbCleanUp(); - #ifdef __cplusplus } #endif diff --git a/source/dnode/vnode/src/inc/vnd.h b/source/dnode/vnode/src/inc/vnd.h index ff622d2dab..b1a5ca4709 100644 --- a/source/dnode/vnode/src/inc/vnd.h +++ b/source/dnode/vnode/src/inc/vnd.h @@ -55,16 +55,32 @@ typedef enum { EVA_PRIORITY_LOW, } EVAPriority; -int32_t vnodeAsyncOpen(int32_t numOfThreads); +typedef enum { + EVA_TASK_COMMIT = 1, + EVA_TASK_MERGE, + EVA_TASK_COMPACT, + EVA_TASK_RETENTION, +} EVATaskT; + +#define COMMIT_TASK_ASYNC 1 +#define MERGE_TASK_ASYNC 2 +#define COMPACT_TASK_ASYNC 3 +#define RETENTION_TASK_ASYNC 4 + +int32_t vnodeAsyncOpen(); void vnodeAsyncClose(); int32_t vnodeAChannelInit(int64_t async, SVAChannelID* channelID); int32_t vnodeAChannelDestroy(SVAChannelID* channelID, bool waitRunning); -int32_t vnodeAsync(SVAChannelID* channelID, EVAPriority priority, int32_t (*execute)(void*), void (*complete)(void*), - void* arg, SVATaskID* taskID); +int32_t vnodeAsync(int64_t async, EVAPriority priority, int32_t (*execute)(void*), void (*complete)(void*), void* arg, + SVATaskID* taskID); +int32_t vnodeAsyncC(SVAChannelID* channelID, EVAPriority priority, int32_t (*execute)(void*), void (*complete)(void*), + void* arg, SVATaskID* taskID); void vnodeAWait(SVATaskID* taskID); int32_t vnodeACancel(SVATaskID* taskID); int32_t vnodeAsyncSetWorkers(int64_t async, int32_t numWorkers); +const char* vnodeGetATaskName(EVATaskT task); + // vnodeBufPool.c typedef struct SVBufPoolNode SVBufPoolNode; struct SVBufPoolNode { diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index ef4b233f94..4f1ecf81dd 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -479,8 +479,7 @@ struct SVnode { SVBufPool* onRecycle; // commit variables - SVAChannelID commitChannel; - SVATaskID commitTask; + SVATaskID commitTask; SMeta* pMeta; SSma* pSma; diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index bb50499ea8..03037e529b 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -25,11 +25,12 @@ // 2: wait to be inited or cleanup static int32_t tqInitialize(STQ* pTq); -static FORCE_INLINE bool tqIsHandleExec(STqHandle* pHandle) { return TMQ_HANDLE_STATUS_EXEC == pHandle->status; } -static FORCE_INLINE void tqSetHandleExec(STqHandle* pHandle) { pHandle->status = TMQ_HANDLE_STATUS_EXEC; } -static FORCE_INLINE void tqSetHandleIdle(STqHandle* pHandle) { pHandle->status = TMQ_HANDLE_STATUS_IDLE; } +static FORCE_INLINE bool tqIsHandleExec(STqHandle* pHandle) { return pHandle != NULL ? TMQ_HANDLE_STATUS_EXEC == pHandle->status : true; } +static FORCE_INLINE void tqSetHandleExec(STqHandle* pHandle) { if (pHandle != NULL) pHandle->status = TMQ_HANDLE_STATUS_EXEC; } +static FORCE_INLINE void tqSetHandleIdle(STqHandle* pHandle) { if (pHandle != NULL) pHandle->status = TMQ_HANDLE_STATUS_IDLE; } void tqDestroyTqHandle(void* data) { + if (data == NULL) return; STqHandle* pData = (STqHandle*)data; qDestroyTask(pData->execHandle.task); @@ -59,11 +60,17 @@ void tqDestroyTqHandle(void* data) { } static bool tqOffsetEqual(const STqOffset* pLeft, const STqOffset* pRight) { + if (pLeft == NULL || pRight == NULL) { + return false; + } return pLeft->val.type == TMQ_OFFSET__LOG && pRight->val.type == TMQ_OFFSET__LOG && pLeft->val.version == pRight->val.version; } int32_t tqOpen(const char* path, SVnode* pVnode) { + if (path == NULL || pVnode == NULL) { + return TSDB_CODE_INVALID_PARA; + } STQ* pTq = taosMemoryCalloc(1, sizeof(STQ)); if (pTq == NULL) { return terrno; @@ -104,6 +111,9 @@ int32_t tqOpen(const char* path, SVnode* pVnode) { } int32_t tqInitialize(STQ* pTq) { + if (pTq == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t vgId = TD_VID(pTq->pVnode); int32_t code = streamMetaOpen(pTq->path, pTq, tqBuildStreamTask, tqExpandStreamTask, vgId, -1, tqStartTaskCompleteCallback, &pTq->pStreamMeta); @@ -157,6 +167,9 @@ void tqNotifyClose(STQ* pTq) { } void tqPushEmptyDataRsp(STqHandle* pHandle, int32_t vgId) { + if (pHandle == NULL) { + return; + } int32_t code = 0; SMqPollReq req = {0}; code = tDeserializeSMqPollReq(pHandle->msg->pCont, pHandle->msg->contLen, &req); @@ -186,6 +199,9 @@ void tqPushEmptyDataRsp(STqHandle* pHandle, int32_t vgId) { int32_t tqSendDataRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataRsp* pRsp, int32_t type, int32_t vgId) { + if (pHandle == NULL || pMsg == NULL || pReq == NULL || pRsp == NULL) { + return TSDB_CODE_INVALID_PARA; + } int64_t sver = 0, ever = 0; walReaderValidVersionRange(pHandle->execHandle.pTqReader->pWalReader, &sver, &ever); @@ -201,6 +217,9 @@ int32_t tqSendDataRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq* } int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) { + if (pTq == NULL) { + return TSDB_CODE_INVALID_PARA; + } SMqVgOffset vgOffset = {0}; int32_t vgId = TD_VID(pTq->pVnode); @@ -243,7 +262,7 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t sversion, char* msg, int32_t } if (tqMetaSaveInfo(pTq, pTq->pOffsetStore, pOffset->subKey, strlen(pOffset->subKey), msg, - msgLen - sizeof(vgOffset.consumerId)) < 0) { + msgLen >= sizeof(vgOffset.consumerId) ? msgLen - sizeof(vgOffset.consumerId) : 0) < 0) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } @@ -255,6 +274,9 @@ end: } int32_t tqProcessSeekReq(STQ* pTq, SRpcMsg* pMsg) { + if (pTq == NULL || pMsg == NULL) { + return TSDB_CODE_INVALID_PARA; + } SMqSeekReq req = {0}; int32_t vgId = TD_VID(pTq->pVnode); SRpcMsg rsp = {.info = pMsg->info}; @@ -297,6 +319,9 @@ end: } int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId) { + if (pTq == NULL) { + return TSDB_CODE_INVALID_PARA; + } void* pIter = NULL; while (1) { @@ -327,6 +352,9 @@ int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId) { } int32_t tqProcessPollPush(STQ* pTq, SRpcMsg* pMsg) { + if (pTq == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t vgId = TD_VID(pTq->pVnode); taosWLockLatch(&pTq->lock); if (taosHashGetSize(pTq->pPushMgr) > 0) { @@ -362,6 +390,9 @@ int32_t tqProcessPollPush(STQ* pTq, SRpcMsg* pMsg) { } int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { + if (pTq == NULL || pMsg == NULL) { + return TSDB_CODE_INVALID_PARA; + } SMqPollReq req = {0}; int code = tDeserializeSMqPollReq(pMsg->pCont, pMsg->contLen, &req); if (code < 0) { @@ -439,6 +470,9 @@ END: } int32_t tqProcessVgCommittedInfoReq(STQ* pTq, SRpcMsg* pMsg) { + if (pTq == NULL || pMsg == NULL) { + return TSDB_CODE_INVALID_PARA; + } void* data = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); int32_t len = pMsg->contLen - sizeof(SMsgHead); @@ -485,6 +519,9 @@ int32_t tqProcessVgCommittedInfoReq(STQ* pTq, SRpcMsg* pMsg) { } int32_t tqProcessVgWalInfoReq(STQ* pTq, SRpcMsg* pMsg) { + if (pTq == NULL || pMsg == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; SMqPollReq req = {0}; if (tDeserializeSMqPollReq(pMsg->pCont, pMsg->contLen, &req) < 0) { @@ -570,6 +607,9 @@ END: } int32_t tqProcessDeleteSubReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) { + if (pTq == NULL || msg == NULL) { + return TSDB_CODE_INVALID_PARA; + } SMqVDeleteReq* pReq = (SMqVDeleteReq*)msg; int32_t vgId = TD_VID(pTq->pVnode); @@ -616,8 +656,11 @@ int32_t tqProcessDeleteSubReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg } int32_t tqProcessAddCheckInfoReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) { + if (pTq == NULL || msg == NULL) { + return TSDB_CODE_INVALID_PARA; + } STqCheckInfo info = {0}; - int32_t code = tqMetaDecodeCheckInfo(&info, msg, msgLen); + int32_t code = tqMetaDecodeCheckInfo(&info, msg, msgLen >= 0 ? msgLen : 0); if (code != 0) { return code; } @@ -628,10 +671,13 @@ int32_t tqProcessAddCheckInfoReq(STQ* pTq, int64_t sversion, char* msg, int32_t return code; } - return tqMetaSaveInfo(pTq, pTq->pCheckStore, info.topic, strlen(info.topic), msg, msgLen); + return tqMetaSaveInfo(pTq, pTq->pCheckStore, info.topic, strlen(info.topic), msg, msgLen >= 0 ? msgLen : 0); } int32_t tqProcessDelCheckInfoReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) { + if (pTq == NULL || msg == NULL) { + return TSDB_CODE_INVALID_PARA; + } if (taosHashRemove(pTq->pCheckInfo, msg, strlen(msg)) < 0) { return TSDB_CODE_TSC_INTERNAL_ERROR; } @@ -639,6 +685,9 @@ int32_t tqProcessDelCheckInfoReq(STQ* pTq, int64_t sversion, char* msg, int32_t } int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) { + if (pTq == NULL || msg == NULL) { + return TSDB_CODE_INVALID_PARA; + } int ret = 0; SMqRebVgReq req = {0}; SDecoder dc = {0}; diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c index 89350e761f..580828b089 100644 --- a/source/dnode/vnode/src/tq/tqMeta.c +++ b/source/dnode/vnode/src/tq/tqMeta.c @@ -16,6 +16,9 @@ #include "tq.h" int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) { + if (pEncoder == NULL || pHandle == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; int32_t lino; @@ -54,6 +57,9 @@ _exit: } int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) { + if (pDecoder == NULL || pHandle == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; int32_t lino; @@ -91,7 +97,10 @@ _exit: return code; } -int32_t tqMetaDecodeCheckInfo(STqCheckInfo* info, void* pVal, int32_t vLen) { +int32_t tqMetaDecodeCheckInfo(STqCheckInfo* info, void* pVal, uint32_t vLen) { + if (info == NULL || pVal == NULL) { + return TSDB_CODE_INVALID_PARA; + } SDecoder decoder = {0}; tDecoderInit(&decoder, (uint8_t*)pVal, vLen); int32_t code = tDecodeSTqCheckInfo(&decoder, info); @@ -104,7 +113,10 @@ int32_t tqMetaDecodeCheckInfo(STqCheckInfo* info, void* pVal, int32_t vLen) { return code; } -int32_t tqMetaDecodeOffsetInfo(STqOffset* info, void* pVal, int32_t vLen) { +int32_t tqMetaDecodeOffsetInfo(STqOffset* info, void* pVal, uint32_t vLen) { + if (info == NULL || pVal == NULL) { + return TSDB_CODE_INVALID_PARA; + } SDecoder decoder = {0}; tDecoderInit(&decoder, (uint8_t*)pVal, vLen); int32_t code = tDecodeSTqOffset(&decoder, info); @@ -118,9 +130,12 @@ int32_t tqMetaDecodeOffsetInfo(STqOffset* info, void* pVal, int32_t vLen) { } int32_t tqMetaSaveOffset(STQ* pTq, STqOffset* pOffset) { + if (pTq == NULL || pOffset == NULL) { + return TSDB_CODE_INVALID_PARA; + } void* buf = NULL; int32_t code = TDB_CODE_SUCCESS; - int32_t vlen; + uint32_t vlen; SEncoder encoder = {0}; tEncodeSize(tEncodeSTqOffset, pOffset, vlen, code); if (code < 0) { @@ -147,7 +162,10 @@ END: return code; } -int32_t tqMetaSaveInfo(STQ* pTq, TTB* ttb, const void* key, int32_t kLen, const void* value, int32_t vLen) { +int32_t tqMetaSaveInfo(STQ* pTq, TTB* ttb, const void* key, uint32_t kLen, const void* value, uint32_t vLen) { + if (pTq == NULL || ttb == NULL || key == NULL || value == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = TDB_CODE_SUCCESS; TXN* txn = NULL; @@ -164,7 +182,10 @@ END: return code; } -int32_t tqMetaDeleteInfo(STQ* pTq, TTB* ttb, const void* key, int32_t kLen) { +int32_t tqMetaDeleteInfo(STQ* pTq, TTB* ttb, const void* key, uint32_t kLen) { + if (pTq == NULL || ttb == NULL || key == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = TDB_CODE_SUCCESS; TXN* txn = NULL; @@ -182,6 +203,9 @@ END: } int32_t tqMetaGetOffset(STQ* pTq, const char* subkey, STqOffset** pOffset) { + if (pTq == NULL || subkey == NULL || pOffset == NULL) { + return TSDB_CODE_INVALID_PARA; + } void* data = taosHashGet(pTq->pOffset, subkey, strlen(subkey)); if (data == NULL) { int vLen = 0; @@ -191,7 +215,7 @@ int32_t tqMetaGetOffset(STQ* pTq, const char* subkey, STqOffset** pOffset) { } STqOffset offset = {0}; - if (tqMetaDecodeOffsetInfo(&offset, data, vLen) != TDB_CODE_SUCCESS) { + if (tqMetaDecodeOffsetInfo(&offset, data, vLen >= 0 ? vLen : 0) != TDB_CODE_SUCCESS) { tdbFree(data); return TSDB_CODE_OUT_OF_MEMORY; } @@ -214,8 +238,11 @@ int32_t tqMetaGetOffset(STQ* pTq, const char* subkey, STqOffset** pOffset) { } int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle) { + if (pTq == NULL || key == NULL || pHandle == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = TDB_CODE_SUCCESS; - int32_t vlen; + uint32_t vlen; void* buf = NULL; SEncoder encoder = {0}; tEncodeSize(tEncodeSTqHandle, pHandle, vlen, code); @@ -238,7 +265,7 @@ int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle) { goto END; } - TQ_ERR_GO_TO_END(tqMetaSaveInfo(pTq, pTq->pExecStore, key, (int)strlen(key), buf, vlen)); + TQ_ERR_GO_TO_END(tqMetaSaveInfo(pTq, pTq->pExecStore, key, strlen(key), buf, vlen)); END: tEncoderClear(&encoder); @@ -247,6 +274,9 @@ END: } static int tqMetaInitHandle(STQ* pTq, STqHandle* handle) { + if (pTq == NULL || handle == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = TDB_CODE_SUCCESS; SVnode* pVnode = pTq->pVnode; @@ -318,7 +348,10 @@ END: return code; } -static int32_t tqMetaRestoreHandle(STQ* pTq, void* pVal, int vLen, STqHandle* handle) { +static int32_t tqMetaRestoreHandle(STQ* pTq, void* pVal, uint32_t vLen, STqHandle* handle) { + if (pTq == NULL || pVal == NULL || handle == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t vgId = TD_VID(pTq->pVnode); SDecoder decoder = {0}; int32_t code = TDB_CODE_SUCCESS; @@ -335,6 +368,9 @@ END: } int32_t tqMetaCreateHandle(STQ* pTq, SMqRebVgReq* req, STqHandle* handle) { + if (pTq == NULL || req == NULL || handle == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t vgId = TD_VID(pTq->pVnode); (void)memcpy(handle->subKey, req->subKey, TSDB_SUBSCRIBE_KEY_LEN); @@ -375,6 +411,9 @@ int32_t tqMetaCreateHandle(STQ* pTq, SMqRebVgReq* req, STqHandle* handle) { } static int32_t tqMetaTransformInfo(TDB* pMetaDB, TTB* pOld, TTB* pNew) { + if (pMetaDB == NULL || pOld == NULL || pNew == NULL) { + return TSDB_CODE_INVALID_PARA; + } TBC* pCur = NULL; void* pKey = NULL; int kLen = 0; @@ -404,6 +443,9 @@ END: } int32_t tqMetaGetHandle(STQ* pTq, const char* key, STqHandle** pHandle) { + if (pTq == NULL || key == NULL || pHandle == NULL) { + return TSDB_CODE_INVALID_PARA; + } void* data = taosHashGet(pTq->pHandle, key, strlen(key)); if (data == NULL) { int vLen = 0; @@ -412,7 +454,7 @@ int32_t tqMetaGetHandle(STQ* pTq, const char* key, STqHandle** pHandle) { return TSDB_CODE_MND_SUBSCRIBE_NOT_EXIST; } STqHandle handle = {0}; - if (tqMetaRestoreHandle(pTq, data, vLen, &handle) != 0) { + if (tqMetaRestoreHandle(pTq, data, vLen >= 0 ? vLen : 0, &handle) != 0) { tdbFree(data); tqDestroyTqHandle(&handle); return TSDB_CODE_OUT_OF_MEMORY; @@ -429,6 +471,9 @@ int32_t tqMetaGetHandle(STQ* pTq, const char* key, STqHandle** pHandle) { } int32_t tqMetaOpenTdb(STQ* pTq) { + if (pTq == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = TDB_CODE_SUCCESS; TQ_ERR_GO_TO_END(tdbOpen(pTq->path, 16 * 1024, 1, &pTq->pMetaDB, 0, 0, NULL)); TQ_ERR_GO_TO_END(tdbTbOpen("tq.db", -1, -1, NULL, pTq->pMetaDB, &pTq->pExecStore, 0)); @@ -440,6 +485,9 @@ END: } static int32_t replaceTqPath(char** path) { + if (path == NULL || *path == NULL) { + return TSDB_CODE_INVALID_PARA; + } char* tpath = NULL; int32_t code = tqBuildFName(&tpath, *path, TQ_SUBSCRIBE_NAME); if (code != 0) { @@ -451,6 +499,9 @@ static int32_t replaceTqPath(char** path) { } static int32_t tqMetaRestoreCheckInfo(STQ* pTq) { + if (pTq == NULL) { + return TSDB_CODE_INVALID_PARA; + } TBC* pCur = NULL; void* pKey = NULL; int kLen = 0; @@ -463,7 +514,7 @@ static int32_t tqMetaRestoreCheckInfo(STQ* pTq) { TQ_ERR_GO_TO_END(tdbTbcMoveToFirst(pCur)); while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) { - TQ_ERR_GO_TO_END(tqMetaDecodeCheckInfo(&info, pVal, vLen)); + TQ_ERR_GO_TO_END(tqMetaDecodeCheckInfo(&info, pVal, vLen >= 0 ? vLen : 0)); TQ_ERR_GO_TO_END(taosHashPut(pTq->pCheckInfo, info.topic, strlen(info.topic), &info, sizeof(STqCheckInfo))); } info.colIdList = NULL; @@ -477,6 +528,9 @@ END: } int32_t tqMetaOpen(STQ* pTq) { + if (pTq == NULL) { + return TSDB_CODE_INVALID_PARA; + } char* maindb = NULL; char* offsetNew = NULL; int32_t code = TDB_CODE_SUCCESS; @@ -504,6 +558,9 @@ END: } int32_t tqMetaTransform(STQ* pTq) { + if (pTq == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = TDB_CODE_SUCCESS; TDB* pMetaDB = NULL; TTB* pExecStore = NULL; @@ -543,6 +600,9 @@ END: } void tqMetaClose(STQ* pTq) { + if (pTq == NULL) { + return; + } int32_t ret = 0; if (pTq->pExecStore) { tdbTbClose(pTq->pExecStore); diff --git a/source/dnode/vnode/src/tq/tqOffset.c b/source/dnode/vnode/src/tq/tqOffset.c index f392269b9f..c42959971b 100644 --- a/source/dnode/vnode/src/tq/tqOffset.c +++ b/source/dnode/vnode/src/tq/tqOffset.c @@ -17,6 +17,9 @@ #include "tq.h" int32_t tqBuildFName(char** data, const char* path, char* name) { + if (data == NULL || path == NULL || name == NULL) { + return TSDB_CODE_INVALID_MSG; + } int32_t len = strlen(path) + strlen(name) + 2; char* fname = taosMemoryCalloc(1, len); if(fname == NULL) { @@ -33,6 +36,9 @@ int32_t tqBuildFName(char** data, const char* path, char* name) { } int32_t tqOffsetRestoreFromFile(STQ* pTq, char* name) { + if (pTq == NULL || name == NULL) { + return TSDB_CODE_INVALID_MSG; + } int32_t code = TDB_CODE_SUCCESS; void* pMemBuf = NULL; @@ -54,6 +60,10 @@ int32_t tqOffsetRestoreFromFile(STQ* pTq, char* name) { } total += INT_BYTES; size = htonl(size); + if (size <= 0) { + code = TSDB_CODE_INVALID_MSG; + goto END; + } pMemBuf = taosMemoryCalloc(1, size); if (pMemBuf == NULL) { code = terrno; diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c index 386b61cce3..2b2667773a 100644 --- a/source/dnode/vnode/src/tq/tqPush.c +++ b/source/dnode/vnode/src/tq/tqPush.c @@ -17,6 +17,9 @@ #include "vnd.h" int32_t tqProcessSubmitReqForSubscribe(STQ* pTq) { + if (pTq == NULL) { + return TSDB_CODE_INVALID_MSG; + } if (taosHashGetSize(pTq->pPushMgr) <= 0) { return 0; } @@ -64,6 +67,9 @@ int32_t tqPushMsg(STQ* pTq, tmsg_t msgType) { } int32_t tqRegisterPushHandle(STQ* pTq, void* handle, SRpcMsg* pMsg) { + if (pTq == NULL || handle == NULL || pMsg == NULL) { + return TSDB_CODE_INVALID_MSG; + } int32_t vgId = TD_VID(pTq->pVnode); STqHandle* pHandle = (STqHandle*)handle; @@ -101,6 +107,9 @@ int32_t tqRegisterPushHandle(STQ* pTq, void* handle, SRpcMsg* pMsg) { } void tqUnregisterPushHandle(STQ* pTq, void *handle) { + if (pTq == NULL || handle == NULL) { + return; + } STqHandle *pHandle = (STqHandle*)handle; int32_t vgId = TD_VID(pTq->pVnode); diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index d924e97ae3..c0c4c4a5a3 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -17,6 +17,9 @@ #include "tq.h" bool isValValidForTable(STqHandle* pHandle, SWalCont* pHead) { + if (pHandle == NULL || pHead == NULL) { + return false; + } if (pHandle->execHandle.subType != TOPIC_SUB_TYPE__TABLE) { return true; } @@ -198,6 +201,9 @@ end: } int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t reqId) { + if (pTq == NULL || pHandle == NULL || fetchOffset == NULL) { + return -1; + } int32_t code = -1; int32_t vgId = TD_VID(pTq->pVnode); int64_t id = pHandle->pWalReader->readerId; @@ -259,9 +265,17 @@ END: return code; } -bool tqGetTablePrimaryKey(STqReader* pReader) { return pReader->hasPrimaryKey; } +bool tqGetTablePrimaryKey(STqReader* pReader) { + if (pReader == NULL) { + return false; + } + return pReader->hasPrimaryKey; +} void tqSetTablePrimaryKey(STqReader* pReader, int64_t uid) { + if (pReader == NULL) { + return; + } bool ret = false; SSchemaWrapper* schema = metaGetTableSchema(pReader->pVnodeMeta, uid, -1, 1, NULL); if (schema && schema->nCols >= 2 && schema->pSchema[1].flags & COL_IS_KEY) { @@ -272,6 +286,9 @@ void tqSetTablePrimaryKey(STqReader* pReader, int64_t uid) { } STqReader* tqReaderOpen(SVnode* pVnode) { + if (pVnode == NULL) { + return NULL; + } STqReader* pReader = taosMemoryCalloc(1, sizeof(STqReader)); if (pReader == NULL) { return NULL; @@ -323,6 +340,9 @@ void tqReaderClose(STqReader* pReader) { } int32_t tqReaderSeek(STqReader* pReader, int64_t ver, const char* id) { + if (pReader == NULL) { + return TSDB_CODE_INVALID_PARA; + } if (walReaderSeekVer(pReader->pWalReader, ver) < 0) { return -1; } @@ -406,6 +426,9 @@ int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, int64_t maxVer, con } bool tqNextBlockInWal(STqReader* pReader, const char* id, int sourceExcluded) { + if (pReader == NULL) { + return false; + } SWalReader* pWalReader = pReader->pWalReader; int64_t st = taosGetTimestampMs(); @@ -462,6 +485,9 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id, int sourceExcluded) { } int32_t tqReaderSetSubmitMsg(STqReader* pReader, void* msgStr, int32_t msgLen, int64_t ver) { +if (pReader == NULL) { + return TSDB_CODE_INVALID_PARA; + } pReader->msg.msgStr = msgStr; pReader->msg.msgLen = msgLen; pReader->msg.ver = ver; @@ -481,14 +507,29 @@ int32_t tqReaderSetSubmitMsg(STqReader* pReader, void* msgStr, int32_t msgLen, i return 0; } -SWalReader* tqGetWalReader(STqReader* pReader) { return pReader->pWalReader; } +SWalReader* tqGetWalReader(STqReader* pReader) { + if (pReader == NULL) { + return NULL; + } + return pReader->pWalReader; +} -SSDataBlock* tqGetResultBlock(STqReader* pReader) { return pReader->pResBlock; } +SSDataBlock* tqGetResultBlock(STqReader* pReader) { + if (pReader == NULL) { + return NULL; + } + return pReader->pResBlock; +} -int64_t tqGetResultBlockTime(STqReader* pReader) { return pReader->lastTs; } +int64_t tqGetResultBlockTime(STqReader* pReader) { + if (pReader == NULL) { + return 0; + } + return pReader->lastTs; +} bool tqNextBlockImpl(STqReader* pReader, const char* idstr) { - if (pReader->msg.msgStr == NULL) { + if (pReader == NULL || pReader->msg.msgStr == NULL) { return false; } @@ -525,7 +566,7 @@ bool tqNextBlockImpl(STqReader* pReader, const char* idstr) { } bool tqNextDataBlockFilterOut(STqReader* pReader, SHashObj* filterOutUids) { - if (pReader->msg.msgStr == NULL) return false; + if (pReader == NULL || pReader->msg.msgStr == NULL) return false; int32_t blockSz = taosArrayGetSize(pReader->submit.aSubmitTbData); while (pReader->nextBlk < blockSz) { @@ -548,6 +589,9 @@ bool tqNextDataBlockFilterOut(STqReader* pReader, SHashObj* filterOutUids) { } int32_t tqMaskBlock(SSchemaWrapper* pDst, SSDataBlock* pBlock, const SSchemaWrapper* pSrc, char* mask) { + if (pDst == NULL || pBlock == NULL || pSrc == NULL || mask == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; int32_t cnt = 0; @@ -577,6 +621,9 @@ int32_t tqMaskBlock(SSchemaWrapper* pDst, SSDataBlock* pBlock, const SSchemaWrap } static int32_t buildResSDataBlock(STqReader* pReader, SSchemaWrapper* pSchema, const SArray* pColIdList) { + if (pReader == NULL || pSchema == NULL || pColIdList == NULL) { + return TSDB_CODE_INVALID_PARA; + } SSDataBlock* pBlock = pReader->pResBlock; if (blockDataGetNumOfCols(pBlock) > 0) { blockDataDestroy(pBlock); @@ -659,6 +706,9 @@ static int32_t doSetVal(SColumnInfoData* pColumnInfoData, int32_t rowIndex, SCol } int32_t tqRetrieveDataBlock(STqReader* pReader, SSDataBlock** pRes, const char* id) { + if (pReader == NULL || pRes == NULL) { + return TSDB_CODE_INVALID_PARA; + } tqTrace("tq reader retrieve data block %p, index:%d", pReader->msg.msgStr, pReader->nextBlk); int32_t code = 0; int32_t line = 0; @@ -825,6 +875,10 @@ END: static int32_t processBuildNew(STqReader* pReader, SSubmitTbData* pSubmitTbData, SArray* blocks, SArray* schemas, SSchemaWrapper* pSchemaWrapper, char* assigned, int32_t numOfRows, int32_t curRow, int32_t* lastRow) { + if (pReader == NULL || pSubmitTbData == NULL || blocks == NULL || schemas == NULL || pSchemaWrapper == NULL || + assigned == NULL || lastRow == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; SSchemaWrapper* pSW = NULL; SSDataBlock* block = NULL; @@ -860,6 +914,9 @@ END: return code; } static int32_t tqProcessColData(STqReader* pReader, SSubmitTbData* pSubmitTbData, SArray* blocks, SArray* schemas) { + if (pReader == NULL || pSubmitTbData == NULL || blocks == NULL || schemas == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; int32_t curRow = 0; int32_t lastRow = 0; @@ -919,6 +976,9 @@ END: } int32_t tqProcessRowData(STqReader* pReader, SSubmitTbData* pSubmitTbData, SArray* blocks, SArray* schemas) { + if (pReader == NULL || pSubmitTbData == NULL || blocks == NULL || schemas == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; STSchema* pTSchema = NULL; @@ -976,6 +1036,9 @@ END: } int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas, SSubmitTbData** pSubmitTbDataRet, int64_t *createTime) { + if (pReader == NULL || blocks == NULL || schemas == NULL) { + return TSDB_CODE_INVALID_PARA; + } tqTrace("tq reader retrieve data block %p, %d", pReader->msg.msgStr, pReader->nextBlk); SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk); if (pSubmitTbData == NULL) { @@ -1007,9 +1070,17 @@ int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas } } -void tqReaderSetColIdList(STqReader* pReader, SArray* pColIdList) { pReader->pColIdList = pColIdList; } +void tqReaderSetColIdList(STqReader* pReader, SArray* pColIdList) { + if (pReader == NULL){ + return; + } + pReader->pColIdList = pColIdList; +} void tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList, const char* id) { + if (pReader == NULL || tbUidList == NULL) { + return; + } if (pReader->tbIdHash) { taosHashClear(pReader->tbIdHash); } else { @@ -1032,6 +1103,9 @@ void tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList, const cha } void tqReaderAddTbUidList(STqReader* pReader, const SArray* pTableUidList) { + if (pReader == NULL || pTableUidList == NULL) { + return; + } if (pReader->tbIdHash == NULL) { pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK); if (pReader->tbIdHash == NULL) { @@ -1051,12 +1125,23 @@ void tqReaderAddTbUidList(STqReader* pReader, const SArray* pTableUidList) { } bool tqReaderIsQueriedTable(STqReader* pReader, uint64_t uid) { - return taosHashGet(pReader->tbIdHash, &uid, sizeof(uint64_t)); + if (pReader == NULL) { + return false; + } + return taosHashGet(pReader->tbIdHash, &uid, sizeof(uint64_t)) != NULL; } -bool tqCurrentBlockConsumed(const STqReader* pReader) { return pReader->msg.msgStr == NULL; } +bool tqCurrentBlockConsumed(const STqReader* pReader) { + if (pReader == NULL) { + return false; + } + return pReader->msg.msgStr == NULL; +} void tqReaderRemoveTbUidList(STqReader* pReader, const SArray* tbUidList) { + if (pReader == NULL || tbUidList == NULL) { + return; + } for (int32_t i = 0; i < taosArrayGetSize(tbUidList); i++) { int64_t* pKey = (int64_t*)taosArrayGet(tbUidList, i); if (pKey && taosHashRemove(pReader->tbIdHash, pKey, sizeof(int64_t)) != 0) { @@ -1066,6 +1151,9 @@ void tqReaderRemoveTbUidList(STqReader* pReader, const SArray* tbUidList) { } int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) { + if (pTq == NULL || tbUidList == NULL) { + return TSDB_CODE_INVALID_PARA; + } void* pIter = NULL; int32_t vgId = TD_VID(pTq->pVnode); diff --git a/source/dnode/vnode/src/tq/tqScan.c b/source/dnode/vnode/src/tq/tqScan.c index 39eaac39b3..3419cd0020 100644 --- a/source/dnode/vnode/src/tq/tqScan.c +++ b/source/dnode/vnode/src/tq/tqScan.c @@ -16,6 +16,9 @@ #include "tq.h" int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t numOfCols, int8_t precision) { + if (pBlock == NULL || pRsp == NULL) { + return TSDB_CODE_INVALID_PARA; + } size_t dataEncodeBufSize = blockGetEncodeSize(pBlock); int32_t dataStrLen = sizeof(SRetrieveTableRspForTmq) + dataEncodeBufSize; void* buf = taosMemoryCalloc(1, dataStrLen); @@ -47,18 +50,10 @@ int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t return TSDB_CODE_SUCCESS; } -static int32_t tqAddBlockSchemaToRsp(const STqExecHandle* pExec, SMqDataRsp* pRsp) { - SSchemaWrapper* pSW = tCloneSSchemaWrapper(pExec->pTqReader->pSchemaWrapper); - if (pSW == NULL) { - return terrno; - } - if (taosArrayPush(pRsp->blockSchema, &pSW) == NULL) { - return terrno; - } - return 0; -} - static int32_t tqAddTbNameToRsp(const STQ* pTq, int64_t uid, SMqDataRsp* pRsp, int32_t n) { + if (pRsp == NULL || pTq == NULL) { + return TSDB_CODE_INVALID_PARA; + } SMetaReader mr = {0}; metaReaderDoInit(&mr, pTq->pVnode->pMeta, META_READER_LOCK); @@ -84,6 +79,9 @@ static int32_t tqAddTbNameToRsp(const STQ* pTq, int64_t uid, SMqDataRsp* pRsp, i } int32_t getDataBlock(qTaskInfo_t task, const STqHandle* pHandle, int32_t vgId, SSDataBlock** res) { + if (task == NULL || pHandle == NULL || res == NULL) { + return TSDB_CODE_INVALID_PARA; + } uint64_t ts = 0; qStreamSetOpen(task); @@ -99,6 +97,9 @@ int32_t getDataBlock(qTaskInfo_t task, const STqHandle* pHandle, int32_t vgId, S } int32_t tqScanData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset, const SMqPollReq* pRequest) { + if (pTq == NULL || pHandle == NULL || pRsp == NULL || pOffset == NULL || pRequest == NULL){ + return TSDB_CODE_INVALID_PARA; + } int32_t vgId = TD_VID(pTq->pVnode); int32_t code = 0; int32_t line = 0; @@ -189,6 +190,9 @@ END: } int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqBatchMetaRsp* pBatchMetaRsp, STqOffsetVal* pOffset) { + if (pTq == NULL || pHandle == NULL || pRsp == NULL || pBatchMetaRsp == NULL || pOffset == NULL) { + return TSDB_CODE_INVALID_PARA; + } const STqExecHandle* pExec = &pHandle->execHandle; qTaskInfo_t task = pExec->task; int code = qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType); @@ -280,6 +284,9 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqBat } static int32_t buildCreateTbInfo(SMqDataRsp* pRsp, SVCreateTbReq* pCreateTbReq){ + if (pRsp == NULL || pCreateTbReq == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t code = 0; void* createReq = NULL; if (pRsp->createTableNum == 0) { @@ -329,6 +336,9 @@ END: } static void tqProcessSubData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, int32_t* totalRows, int8_t sourceExcluded){ + if (pTq == NULL || pHandle == NULL || pRsp == NULL || totalRows == NULL) { + return; + } int32_t code = 0; STqExecHandle* pExec = &pHandle->execHandle; STqReader* pReader = pExec->pTqReader; @@ -407,6 +417,9 @@ END: int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, SMqDataRsp* pRsp, int32_t* totalRows, int8_t sourceExcluded) { + if (pTq == NULL || pHandle == NULL || pRsp == NULL || totalRows == NULL) { + return TSDB_CODE_INVALID_PARA; + } STqExecHandle* pExec = &pHandle->execHandle; int32_t code = 0; STqReader* pReader = pExec->pTqReader; diff --git a/source/dnode/vnode/src/tq/tqSnapshot.c b/source/dnode/vnode/src/tq/tqSnapshot.c index cfa97def74..219ea4b6b4 100644 --- a/source/dnode/vnode/src/tq/tqSnapshot.c +++ b/source/dnode/vnode/src/tq/tqSnapshot.c @@ -27,6 +27,9 @@ struct STqSnapReader { }; int32_t tqSnapReaderOpen(STQ* pTq, int64_t sver, int64_t ever, int8_t type, STqSnapReader** ppReader) { + if (pTq == NULL || ppReader == NULL) { + return TSDB_CODE_INVALID_MSG; + } int32_t code = 0; STqSnapReader* pReader = NULL; @@ -77,12 +80,18 @@ _err: } void tqSnapReaderClose(STqSnapReader** ppReader) { + if (ppReader == NULL || *ppReader == NULL) { + return; + } tdbTbcClose((*ppReader)->pCur); taosMemoryFree(*ppReader); *ppReader = NULL; } int32_t tqSnapRead(STqSnapReader* pReader, uint8_t** ppData) { + if (pReader == NULL || ppData == NULL) { + return TSDB_CODE_INVALID_MSG; + } int32_t code = 0; void* pKey = NULL; void* pVal = NULL; @@ -126,6 +135,9 @@ struct STqSnapWriter { }; int32_t tqSnapWriterOpen(STQ* pTq, int64_t sver, int64_t ever, STqSnapWriter** ppWriter) { + if (pTq == NULL || ppWriter == NULL) { + return TSDB_CODE_INVALID_MSG; + } int32_t code = 0; STqSnapWriter* pWriter = NULL; @@ -156,6 +168,9 @@ _err: } int32_t tqSnapWriterClose(STqSnapWriter** ppWriter, int8_t rollback) { + if (ppWriter == NULL || *ppWriter == NULL) { + return TSDB_CODE_INVALID_MSG; + } int32_t code = 0; STqSnapWriter* pWriter = *ppWriter; STQ* pTq = pWriter->pTq; @@ -180,6 +195,9 @@ _err: } int32_t tqSnapHandleWrite(STqSnapWriter* pWriter, uint8_t* pData, uint32_t nData) { + if (pWriter == NULL || pData == NULL || nData < sizeof(SSnapDataHdr)) { + return TSDB_CODE_INVALID_MSG; + } int32_t code = 0; STQ* pTq = pWriter->pTq; SDecoder decoder = {0}; @@ -190,7 +208,7 @@ int32_t tqSnapHandleWrite(STqSnapWriter* pWriter, uint8_t* pData, uint32_t nData code = tDecodeSTqHandle(pDecoder, &handle); if (code) goto end; taosWLockLatch(&pTq->lock); - code = tqMetaSaveInfo(pTq, pTq->pExecStore, handle.subKey, (int)strlen(handle.subKey), pData + sizeof(SSnapDataHdr), + code = tqMetaSaveInfo(pTq, pTq->pExecStore, handle.subKey, strlen(handle.subKey), pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr)); taosWUnLockLatch(&pTq->lock); @@ -202,6 +220,9 @@ end: } int32_t tqSnapCheckInfoWrite(STqSnapWriter* pWriter, uint8_t* pData, uint32_t nData) { + if (pWriter == NULL || pData == NULL || nData < sizeof(SSnapDataHdr)) { + return TSDB_CODE_INVALID_MSG; + } int32_t code = 0; STQ* pTq = pWriter->pTq; STqCheckInfo info = {0}; @@ -223,6 +244,9 @@ _err: } int32_t tqSnapOffsetWrite(STqSnapWriter* pWriter, uint8_t* pData, uint32_t nData) { + if (pWriter == NULL || pData == NULL || nData < sizeof(SSnapDataHdr)) { + return TSDB_CODE_INVALID_MSG; + } int32_t code = 0; STQ* pTq = pWriter->pTq; diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index a92049e5f3..f6a8563c70 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -21,6 +21,9 @@ static int32_t tqSendBatchMetaPollRsp(STqHandle* pHandle, const SRpcMsg* pMsg, c const SMqBatchMetaRsp* pRsp, int32_t vgId); int32_t tqInitDataRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset) { + if (pRsp == NULL) { + return TSDB_CODE_INVALID_PARA; + } pRsp->blockData = taosArrayInit(0, sizeof(void*)); pRsp->blockDataLen = taosArrayInit(0, sizeof(int32_t)); @@ -41,6 +44,9 @@ void tqUpdateNodeStage(STQ* pTq, bool isLeader) { } static int32_t tqInitTaosxRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset) { + if (pRsp == NULL) { + return TSDB_CODE_INVALID_PARA; + } tOffsetCopy(&pRsp->reqOffset, &pOffset); tOffsetCopy(&pRsp->rspOffset, &pOffset); @@ -81,6 +87,9 @@ static int32_t tqInitTaosxRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset) { static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg, bool* pBlockReturned) { + if (pOffsetVal == NULL || pTq == NULL || pHandle == NULL || pRequest == NULL || pMsg == NULL || pBlockReturned == NULL) { + return TSDB_CODE_INVALID_PARA; + } uint64_t consumerId = pRequest->consumerId; STqOffset* pOffset = NULL; int32_t code = tqMetaGetOffset(pTq, pRequest->subKey, &pOffset); @@ -142,6 +151,9 @@ static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHand static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg, STqOffsetVal* pOffset) { + if (pTq == NULL || pHandle == NULL || pRequest == NULL || pMsg == NULL || pOffset == NULL) { + return TSDB_CODE_INVALID_PARA; + } uint64_t consumerId = pRequest->consumerId; int32_t vgId = TD_VID(pTq->pVnode); terrno = 0; @@ -212,6 +224,9 @@ static void tDeleteCommon(void* parm) {} static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg, STqOffsetVal* offset) { + if (pTq == NULL || pHandle == NULL || pRequest == NULL || pMsg == NULL || offset == NULL) { + return TSDB_CODE_INVALID_PARA; + } int32_t vgId = TD_VID(pTq->pVnode); SMqDataRsp taosxRsp = {0}; SMqBatchMetaRsp btMetaRsp = {0}; @@ -410,6 +425,9 @@ END: } int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg) { + if (pTq == NULL || pHandle == NULL || pRequest == NULL || pMsg == NULL) { + return TSDB_CODE_TMQ_INVALID_MSG; + } int32_t code = 0; STqOffsetVal reqOffset = {0}; tOffsetCopy(&reqOffset, &pRequest->reqOffset); @@ -445,6 +463,9 @@ END: static void initMqRspHead(SMqRspHead* pMsgHead, int32_t type, int32_t epoch, int64_t consumerId, int64_t sver, int64_t ever) { + if (pMsgHead == NULL) { + return; + } pMsgHead->consumerId = consumerId; pMsgHead->epoch = epoch; pMsgHead->mqMsgType = type; @@ -454,6 +475,9 @@ static void initMqRspHead(SMqRspHead* pMsgHead, int32_t type, int32_t epoch, int int32_t tqSendBatchMetaPollRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqBatchMetaRsp* pRsp, int32_t vgId) { + if (pHandle == NULL || pMsg == NULL || pReq == NULL || pRsp == NULL) { + return TSDB_CODE_TMQ_INVALID_MSG; + } int32_t len = 0; int32_t code = 0; tEncodeSize(tEncodeMqBatchMetaRsp, pRsp, len, code); @@ -491,6 +515,9 @@ int32_t tqSendBatchMetaPollRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SM int32_t tqSendMetaPollRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqMetaRsp* pRsp, int32_t vgId) { + if (pHandle == NULL || pMsg == NULL || pReq == NULL || pRsp == NULL) { + return TSDB_CODE_TMQ_INVALID_MSG; + } int32_t len = 0; int32_t code = 0; tEncodeSize(tEncodeMqMetaRsp, pRsp, len, code); @@ -529,6 +556,9 @@ int32_t tqSendMetaPollRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPoll int32_t tqDoSendDataRsp(const SRpcHandleInfo* pRpcHandleInfo, const SMqDataRsp* pRsp, int32_t epoch, int64_t consumerId, int32_t type, int64_t sver, int64_t ever) { + if (pRpcHandleInfo == NULL || pRsp == NULL) { + return TSDB_CODE_TMQ_INVALID_MSG; + } int32_t len = 0; int32_t code = 0; diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index 8fd0d47969..2047b68101 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -2635,23 +2635,15 @@ _exit: } int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey) { - int32_t code = 0, lino = 0; - // fetch schema + int32_t code = 0, lino = 0; STSchema *pTSchema = NULL; int sver = -1; + int numKeys = 0; + SArray *remainCols = NULL; TAOS_CHECK_RETURN(metaGetTbTSchemaEx(pTsdb->pVnode->pMeta, suid, uid, sver, &pTSchema)); - // build keys & multi get from rocks - int numCols = pTSchema->numOfCols; - int numKeys = 0; - SArray *remainCols = NULL; - - code = tsdbCacheCommit(pTsdb); - if (code != TSDB_CODE_SUCCESS) { - tsdbTrace("vgId:%d, %s commit failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - } + int numCols = pTSchema->numOfCols; (void)taosThreadMutexLock(&pTsdb->lruMutex); diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit2.c b/source/dnode/vnode/src/tsdb/tsdbCommit2.c index e3c75760c8..5822463f9e 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCommit2.c +++ b/source/dnode/vnode/src/tsdb/tsdbCommit2.c @@ -574,7 +574,7 @@ static int32_t tsdbCommitInfoBuild(STsdb *tsdb) { // begin tasks on file set for (int i = 0; i < taosArrayGetSize(tsdb->commitInfo->arr); i++) { SFileSetCommitInfo *info = *(SFileSetCommitInfo **)taosArrayGet(tsdb->commitInfo->arr, i); - tsdbBeginTaskOnFileSet(tsdb, info->fid, &fset); + tsdbBeginTaskOnFileSet(tsdb, info->fid, EVA_TASK_COMMIT, &fset); if (fset) { code = tsdbTFileSetInitCopy(tsdb, fset, &info->fset); if (code) { @@ -712,7 +712,7 @@ int32_t tsdbCommitCommit(STsdb *tsdb) { for (int32_t i = 0; i < taosArrayGetSize(tsdb->commitInfo->arr); i++) { SFileSetCommitInfo *info = *(SFileSetCommitInfo **)taosArrayGet(tsdb->commitInfo->arr, i); if (info->fset) { - tsdbFinishTaskOnFileSet(tsdb, info->fid); + tsdbFinishTaskOnFileSet(tsdb, info->fid, EVA_TASK_COMMIT); } } @@ -743,7 +743,7 @@ int32_t tsdbCommitAbort(STsdb *pTsdb) { for (int32_t i = 0; i < taosArrayGetSize(pTsdb->commitInfo->arr); i++) { SFileSetCommitInfo *info = *(SFileSetCommitInfo **)taosArrayGet(pTsdb->commitInfo->arr, i); if (info->fset) { - tsdbFinishTaskOnFileSet(pTsdb, info->fid); + tsdbFinishTaskOnFileSet(pTsdb, info->fid, EVA_TASK_COMMIT); } } (void)taosThreadMutexUnlock(&pTsdb->mutex); diff --git a/source/dnode/vnode/src/tsdb/tsdbFS2.c b/source/dnode/vnode/src/tsdb/tsdbFS2.c index 9a7cdca8f7..82dd49b0e2 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS2.c +++ b/source/dnode/vnode/src/tsdb/tsdbFS2.c @@ -770,8 +770,8 @@ extern void tsdbStopAllCompTask(STsdb *tsdb); int32_t tsdbDisableAndCancelAllBgTask(STsdb *pTsdb) { STFileSystem *fs = pTsdb->pFS; - SArray *channelArray = taosArrayInit(0, sizeof(SVAChannelID)); - if (channelArray == NULL) { + SArray *asyncTasks = taosArrayInit(0, sizeof(SVATaskID)); + if (asyncTasks == NULL) { return terrno; } @@ -783,30 +783,31 @@ int32_t tsdbDisableAndCancelAllBgTask(STsdb *pTsdb) { // collect channel STFileSet *fset; TARRAY2_FOREACH(fs->fSetArr, fset) { - if (fset->channelOpened) { - if (taosArrayPush(channelArray, &fset->channel) == NULL) { - taosArrayDestroy(channelArray); - (void)taosThreadMutexUnlock(&pTsdb->mutex); - return terrno; - } - fset->channel = (SVAChannelID){0}; - fset->mergeScheduled = false; - tsdbFSSetBlockCommit(fset, false); - fset->channelOpened = false; + if (taosArrayPush(asyncTasks, &fset->mergeTask) == NULL // + || taosArrayPush(asyncTasks, &fset->compactTask) == NULL // + || taosArrayPush(asyncTasks, &fset->retentionTask) == NULL) { + taosArrayDestroy(asyncTasks); + (void)taosThreadMutexUnlock(&pTsdb->mutex); + return terrno; } + fset->mergeScheduled = false; + tsdbFSSetBlockCommit(fset, false); } (void)taosThreadMutexUnlock(&pTsdb->mutex); // destroy all channels - for (int32_t i = 0; i < taosArrayGetSize(channelArray); i++) { - SVAChannelID *channel = taosArrayGet(channelArray, i); - int32_t code = vnodeAChannelDestroy(channel, true); - if (code) { - tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); + for (int32_t k = 0; k < 2; k++) { + for (int32_t i = 0; i < taosArrayGetSize(asyncTasks); i++) { + SVATaskID *task = taosArrayGet(asyncTasks, i); + if (k == 0) { + (void)vnodeACancel(task); + } else { + vnodeAWait(task); + } } } - taosArrayDestroy(channelArray); + taosArrayDestroy(asyncTasks); #ifdef TD_ENTERPRISE tsdbStopAllCompTask(pTsdb); @@ -934,9 +935,6 @@ int32_t tsdbFSEditCommit(STFileSystem *fs) { // bool skipMerge = false; int32_t numFile = TARRAY2_SIZE(lvl->fobjArr); if (numFile >= sttTrigger && (!fset->mergeScheduled)) { - code = tsdbTFileSetOpenChannel(fset); - TSDB_CHECK_CODE(code, lino, _exit); - SMergeArg *arg = taosMemoryMalloc(sizeof(*arg)); if (arg == NULL) { code = terrno; @@ -946,7 +944,7 @@ int32_t tsdbFSEditCommit(STFileSystem *fs) { arg->tsdb = fs->tsdb; arg->fid = fset->fid; - code = vnodeAsync(&fset->channel, EVA_PRIORITY_HIGH, tsdbMerge, taosAutoMemoryFree, arg, NULL); + code = vnodeAsync(MERGE_TASK_ASYNC, EVA_PRIORITY_HIGH, tsdbMerge, taosAutoMemoryFree, arg, &fset->mergeTask); TSDB_CHECK_CODE(code, lino, _exit); fset->mergeScheduled = true; } @@ -1202,42 +1200,61 @@ _out: void tsdbFSDestroyRefRangedSnapshot(TFileSetRangeArray **fsrArr) { tsdbTFileSetRangeArrayDestroy(fsrArr); } -void tsdbBeginTaskOnFileSet(STsdb *tsdb, int32_t fid, STFileSet **fset) { +void tsdbBeginTaskOnFileSet(STsdb *tsdb, int32_t fid, EVATaskT task, STFileSet **fset) { + // Here, sttTrigger is protected by tsdb->mutex, so it is safe to read it without lock int16_t sttTrigger = tsdb->pVnode->config.sttTrigger; tsdbFSGetFSet(tsdb->pFS, fid, fset); - if (sttTrigger == 1 && (*fset)) { - for (;;) { - if ((*fset)->taskRunning) { - (*fset)->numWaitTask++; - - (void)taosThreadCondWait(&(*fset)->beginTask, &tsdb->mutex); - - tsdbFSGetFSet(tsdb->pFS, fid, fset); - - (*fset)->numWaitTask--; - } else { - (*fset)->taskRunning = true; - break; - } - } - tsdbInfo("vgId:%d begin task on file set:%d", TD_VID(tsdb->pVnode), fid); + if (*fset == NULL) { + return; } + + struct STFileSetCond *cond = NULL; + if (sttTrigger == 1 || task == EVA_TASK_COMMIT) { + cond = &(*fset)->conds[0]; + } else { + cond = &(*fset)->conds[1]; + } + + while (1) { + if (cond->running) { + cond->numWait++; + (void)taosThreadCondWait(&cond->cond, &tsdb->mutex); + cond->numWait--; + } else { + cond->running = true; + break; + } + } + + tsdbInfo("vgId:%d begin %s task on file set:%d", TD_VID(tsdb->pVnode), vnodeGetATaskName(task), fid); + return; } -void tsdbFinishTaskOnFileSet(STsdb *tsdb, int32_t fid) { +void tsdbFinishTaskOnFileSet(STsdb *tsdb, int32_t fid, EVATaskT task) { + // Here, sttTrigger is protected by tsdb->mutex, so it is safe to read it without lock int16_t sttTrigger = tsdb->pVnode->config.sttTrigger; - if (sttTrigger == 1) { - STFileSet *fset = NULL; - tsdbFSGetFSet(tsdb->pFS, fid, &fset); - if (fset != NULL && fset->taskRunning) { - fset->taskRunning = false; - if (fset->numWaitTask > 0) { - (void)taosThreadCondSignal(&fset->beginTask); - } - tsdbInfo("vgId:%d finish task on file set:%d", TD_VID(tsdb->pVnode), fid); - } + + STFileSet *fset = NULL; + tsdbFSGetFSet(tsdb->pFS, fid, &fset); + if (fset == NULL) { + return; } + + struct STFileSetCond *cond = NULL; + if (sttTrigger == 1 || task == EVA_TASK_COMMIT) { + cond = &fset->conds[0]; + } else { + cond = &fset->conds[1]; + } + + cond->running = false; + if (cond->numWait > 0) { + (void)taosThreadCondSignal(&cond->cond); + } + + tsdbInfo("vgId:%d finish %s task on file set:%d", TD_VID(tsdb->pVnode), vnodeGetATaskName(task), fid); + return; } struct SFileSetReader { diff --git a/source/dnode/vnode/src/tsdb/tsdbFS2.h b/source/dnode/vnode/src/tsdb/tsdbFS2.h index 119015636b..9694edcdd9 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS2.h +++ b/source/dnode/vnode/src/tsdb/tsdbFS2.h @@ -14,6 +14,7 @@ */ #include "tsdbFSet2.h" +#include "vnd.h" #ifndef _TSDB_FILE_SYSTEM_H #define _TSDB_FILE_SYSTEM_H @@ -61,8 +62,8 @@ int32_t tsdbFSEditAbort(STFileSystem *fs); // other void tsdbFSGetFSet(STFileSystem *fs, int32_t fid, STFileSet **fset); void tsdbFSCheckCommit(STsdb *tsdb, int32_t fid); -void tsdbBeginTaskOnFileSet(STsdb *tsdb, int32_t fid, STFileSet **fset); -void tsdbFinishTaskOnFileSet(STsdb *tsdb, int32_t fid); +void tsdbBeginTaskOnFileSet(STsdb *tsdb, int32_t fid, EVATaskT task, STFileSet **fset); +void tsdbFinishTaskOnFileSet(STsdb *tsdb, int32_t fid, EVATaskT task); // utils int32_t save_fs(const TFileSetArray *arr, const char *fname); void current_fname(STsdb *pTsdb, char *fname, EFCurrentT ftype); diff --git a/source/dnode/vnode/src/tsdb/tsdbFSet2.c b/source/dnode/vnode/src/tsdb/tsdbFSet2.c index a0ae58ac96..68914300e4 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFSet2.c +++ b/source/dnode/vnode/src/tsdb/tsdbFSet2.c @@ -480,16 +480,18 @@ int32_t tsdbTFileSetInit(int32_t fid, STFileSet **fset) { fset[0]->maxVerValid = VERSION_MAX; TARRAY2_INIT(fset[0]->lvlArr); - // background task queue - (void)taosThreadCondInit(&(*fset)->beginTask, NULL); - (*fset)->taskRunning = false; - (*fset)->numWaitTask = 0; - // block commit variables (void)taosThreadCondInit(&fset[0]->canCommit, NULL); (*fset)->numWaitCommit = 0; (*fset)->blockCommit = false; + for (int32_t i = 0; i < sizeof((*fset)->conds) / sizeof((*fset)->conds[0]); ++i) { + struct STFileSetCond *cond = &(*fset)->conds[i]; + cond->running = false; + cond->numWait = 0; + (void)taosThreadCondInit(&cond->cond, NULL); + } + return 0; } @@ -648,8 +650,10 @@ void tsdbTFileSetClear(STFileSet **fset) { TARRAY2_DESTROY((*fset)->lvlArr, tsdbSttLvlClear); - (void)taosThreadCondDestroy(&(*fset)->beginTask); (void)taosThreadCondDestroy(&(*fset)->canCommit); + for (int32_t i = 0; i < sizeof((*fset)->conds) / sizeof((*fset)->conds[0]); ++i) { + (void)taosThreadCondDestroy(&(*fset)->conds[i].cond); + } taosMemoryFreeClear(*fset); } } @@ -703,14 +707,3 @@ bool tsdbTFileSetIsEmpty(const STFileSet *fset) { } return TARRAY2_SIZE(fset->lvlArr) == 0; } - -int32_t tsdbTFileSetOpenChannel(STFileSet *fset) { - int32_t code; - if (!fset->channelOpened) { - if ((code = vnodeAChannelInit(2, &fset->channel))) { - return code; - } - fset->channelOpened = true; - } - return 0; -} diff --git a/source/dnode/vnode/src/tsdb/tsdbFSet2.h b/source/dnode/vnode/src/tsdb/tsdbFSet2.h index 24ae59e300..83ef32e5e5 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFSet2.h +++ b/source/dnode/vnode/src/tsdb/tsdbFSet2.h @@ -68,8 +68,6 @@ bool tsdbTFileSetIsEmpty(const STFileSet *fset); // stt int32_t tsdbSttLvlInit(int32_t level, SSttLvl **lvl); void tsdbSttLvlClear(SSttLvl **lvl); -// open channel -int32_t tsdbTFileSetOpenChannel(STFileSet *fset); struct STFileOp { tsdb_fop_t optype; @@ -83,26 +81,30 @@ struct SSttLvl { TFileObjArray fobjArr[1]; }; +struct STFileSetCond { + bool running; + int32_t numWait; + TdThreadCond cond; +}; + struct STFileSet { int32_t fid; int64_t maxVerValid; STFileObj *farr[TSDB_FTYPE_MAX]; // file array TSttLvlArray lvlArr[1]; // level array - // background task - bool channelOpened; - SVAChannelID channel; - bool mergeScheduled; - - // sttTrigger = 1 - TdThreadCond beginTask; - bool taskRunning; - int32_t numWaitTask; + bool mergeScheduled; + SVATaskID mergeTask; + SVATaskID compactTask; + SVATaskID retentionTask; // block commit variables TdThreadCond canCommit; int32_t numWaitCommit; bool blockCommit; + + // conditions + struct STFileSetCond conds[2]; }; struct STFileSetRange { diff --git a/source/dnode/vnode/src/tsdb/tsdbMerge.c b/source/dnode/vnode/src/tsdb/tsdbMerge.c index 61a82d828e..39d8a57692 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMerge.c +++ b/source/dnode/vnode/src/tsdb/tsdbMerge.c @@ -462,21 +462,29 @@ _exit: static int32_t tsdbMergeGetFSet(SMerger *merger) { STFileSet *fset; + int32_t code; + STsdb *tsdb = merger->tsdb; (void)taosThreadMutexLock(&merger->tsdb->mutex); - tsdbFSGetFSet(merger->tsdb->pFS, merger->fid, &fset); - if (fset == NULL) { + + if (tsdb->bgTaskDisabled) { (void)taosThreadMutexUnlock(&merger->tsdb->mutex); return 0; } - fset->mergeScheduled = false; + tsdbBeginTaskOnFileSet(tsdb, merger->fid, EVA_TASK_MERGE, &fset); + if (NULL == fset) { + (void)taosThreadMutexUnlock(&merger->tsdb->mutex); + return 0; + } - int32_t code = tsdbTFileSetInitCopy(merger->tsdb, fset, &merger->fset); + code = tsdbTFileSetInitCopy(merger->tsdb, fset, &merger->fset); if (code) { (void)taosThreadMutexUnlock(&merger->tsdb->mutex); return code; } + + fset->mergeScheduled = false; (void)taosThreadMutexUnlock(&merger->tsdb->mutex); return 0; } @@ -493,10 +501,13 @@ int32_t tsdbMerge(void *arg) { .sttTrigger = tsdb->pVnode->config.sttTrigger, }}; - if (merger->sttTrigger <= 1) return 0; + if (merger->sttTrigger <= 1) { + return 0; + } // copy snapshot - TAOS_CHECK_GOTO(tsdbMergeGetFSet(merger), &lino, _exit); + code = tsdbMergeGetFSet(merger); + TSDB_CHECK_CODE(code, lino, _exit); if (merger->fset == NULL) { return 0; @@ -509,12 +520,19 @@ int32_t tsdbMerge(void *arg) { TSDB_CHECK_CODE(code, lino, _exit); _exit: + if (merger->fset) { + (void)taosThreadMutexLock(&tsdb->mutex); + tsdbFinishTaskOnFileSet(tsdb, mergeArg->fid, EVA_TASK_MERGE); + (void)taosThreadMutexUnlock(&tsdb->mutex); + } + if (code) { tsdbError("vgId:%d %s failed at %s:%d since %s", TD_VID(tsdb->pVnode), __func__, __FILE__, lino, tstrerror(code)); tsdbFatal("vgId:%d, failed to merge stt files since %s. code:%d", TD_VID(tsdb->pVnode), terrstr(), code); taosMsleep(100); exit(EXIT_FAILURE); } + tsdbTFileSetClear(&merger->fset); taosMemoryFree(arg); return code; diff --git a/source/dnode/vnode/src/tsdb/tsdbOpen.c b/source/dnode/vnode/src/tsdb/tsdbOpen.c index b2e4621878..c1f8f45d7e 100644 --- a/source/dnode/vnode/src/tsdb/tsdbOpen.c +++ b/source/dnode/vnode/src/tsdb/tsdbOpen.c @@ -18,22 +18,6 @@ extern int32_t tsdbOpenCompMonitor(STsdb *tsdb); extern void tsdbCloseCompMonitor(STsdb *tsdb); -extern int32_t tsdbInitCompact(); -extern void tsdbCleanupCompact(); - -int32_t tsdbInit() { -#ifdef TD_ENTERPRISE - return tsdbInitCompact(); -#endif - return 0; -} - -void tsdbCleanUp() { -#ifdef TD_ENTERPRISE - tsdbCleanupCompact(); -#endif - return; -} void tsdbSetKeepCfg(STsdb *pTsdb, STsdbCfg *pCfg) { STsdbKeepCfg *pKeepCfg = &pTsdb->keepCfg; diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 05ae4be74b..a9f3893b96 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -5791,7 +5791,6 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) { // make sure only release once void* p = pReader->pReadSnap; - TSDB_CHECK_NULL(p, code, lino, _end, TSDB_CODE_INVALID_PARA); if ((p == atomic_val_compare_exchange_ptr((void**)&pReader->pReadSnap, p, NULL)) && (p != NULL)) { tsdbUntakeReadSnap2(pReader, p, false); pReader->pReadSnap = NULL; diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index bf79b2482d..3d50c81a5d 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -104,7 +104,7 @@ int32_t tsdbOpenFile(const char *path, STsdb *pTsdb, int32_t flag, STsdbFD **ppF } pFD->path = (char *)&pFD[1]; - tstrncpy(pFD->path, path, strlen(path) + 1); + memcpy(pFD->path, path, strlen(path) + 1); pFD->szPage = szPage; pFD->flag = flag; pFD->szPage = szPage; diff --git a/source/dnode/vnode/src/tsdb/tsdbRetention.c b/source/dnode/vnode/src/tsdb/tsdbRetention.c index 7859ee4c66..fcce36b121 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRetention.c +++ b/source/dnode/vnode/src/tsdb/tsdbRetention.c @@ -325,11 +325,21 @@ static int32_t tsdbRetention(void *arg) { // begin task (void)taosThreadMutexLock(&pTsdb->mutex); - tsdbBeginTaskOnFileSet(pTsdb, rtnArg->fid, &fset); + + // check if background task is disabled + if (pTsdb->bgTaskDisabled) { + tsdbInfo("vgId:%d, background task is disabled, skip retention", TD_VID(pTsdb->pVnode)); + (void)taosThreadMutexUnlock(&pTsdb->mutex); + return 0; + } + + // set flag and copy + tsdbBeginTaskOnFileSet(pTsdb, rtnArg->fid, EVA_TASK_RETENTION, &fset); if (fset && (code = tsdbTFileSetInitCopy(pTsdb, fset, &rtner.fset))) { (void)taosThreadMutexUnlock(&pTsdb->mutex); TSDB_CHECK_CODE(code, lino, _exit); } + (void)taosThreadMutexUnlock(&pTsdb->mutex); // do retention @@ -346,7 +356,7 @@ static int32_t tsdbRetention(void *arg) { _exit: if (rtner.fset) { (void)taosThreadMutexLock(&pTsdb->mutex); - tsdbFinishTaskOnFileSet(pTsdb, rtnArg->fid); + tsdbFinishTaskOnFileSet(pTsdb, rtnArg->fid, EVA_TASK_RETENTION); (void)taosThreadMutexUnlock(&pTsdb->mutex); } @@ -364,26 +374,29 @@ static int32_t tsdbAsyncRetentionImpl(STsdb *tsdb, int64_t now, bool s3Migrate) int32_t code = 0; int32_t lino = 0; + // check if background task is disabled + if (tsdb->bgTaskDisabled) { + tsdbInfo("vgId:%d, background task is disabled, skip retention", TD_VID(tsdb->pVnode)); + return 0; + } + STFileSet *fset; + TARRAY2_FOREACH(tsdb->pFS->fSetArr, fset) { + SRtnArg *arg = taosMemoryMalloc(sizeof(*arg)); + if (arg == NULL) { + TAOS_CHECK_GOTO(terrno, &lino, _exit); + } - if (!tsdb->bgTaskDisabled) { - TARRAY2_FOREACH(tsdb->pFS->fSetArr, fset) { - TAOS_CHECK_GOTO(tsdbTFileSetOpenChannel(fset), &lino, _exit); + arg->tsdb = tsdb; + arg->now = now; + arg->fid = fset->fid; + arg->s3Migrate = s3Migrate; - SRtnArg *arg = taosMemoryMalloc(sizeof(*arg)); - if (arg == NULL) { - TAOS_CHECK_GOTO(terrno, &lino, _exit); - } - - arg->tsdb = tsdb; - arg->now = now; - arg->fid = fset->fid; - arg->s3Migrate = s3Migrate; - - if ((code = vnodeAsync(&fset->channel, EVA_PRIORITY_LOW, tsdbRetention, tsdbRetentionCancel, arg, NULL))) { - taosMemoryFree(arg); - TSDB_CHECK_CODE(code, lino, _exit); - } + code = vnodeAsync(RETENTION_TASK_ASYNC, EVA_PRIORITY_LOW, tsdbRetention, tsdbRetentionCancel, arg, + &fset->retentionTask); + if (code) { + taosMemoryFree(arg); + TSDB_CHECK_CODE(code, lino, _exit); } } diff --git a/source/dnode/vnode/src/vnd/vnodeAsync.c b/source/dnode/vnode/src/vnd/vnodeAsync.c index 424ed0f325..49c1306736 100644 --- a/source/dnode/vnode/src/vnd/vnodeAsync.c +++ b/source/dnode/vnode/src/vnd/vnodeAsync.c @@ -118,9 +118,19 @@ struct SVAsync { SVHashTable *taskTable; }; -SVAsync *vnodeAsyncs[3]; +struct { + const char *label; + SVAsync *async; +} GVnodeAsyncs[] = { + [0] = {NULL, NULL}, + [1] = {"vnode-commit", NULL}, + [2] = {"vnode-merge", NULL}, + [3] = {"vnode-compact", NULL}, + [4] = {"vnode-retention", NULL}, +}; + #define MIN_ASYNC_ID 1 -#define MAX_ASYNC_ID (sizeof(vnodeAsyncs) / sizeof(vnodeAsyncs[0]) - 1) +#define MAX_ASYNC_ID (sizeof(GVnodeAsyncs) / sizeof(GVnodeAsyncs[0]) - 1) static void vnodeAsyncTaskDone(SVAsync *async, SVATask *task) { int32_t ret; @@ -330,7 +340,7 @@ static int32_t vnodeAsyncInit(SVAsync **async, const char *label) { return terrno; } - tstrncpy((char *)((*async) + 1), label, strlen(label) + 1); + memcpy((char *)((*async) + 1), label, strlen(label) + 1); (*async)->label = (const char *)((*async) + 1); (void)taosThreadMutexInit(&(*async)->mutex, NULL); @@ -447,36 +457,47 @@ static void vnodeAsyncLaunchWorker(SVAsync *async) { } } -int32_t vnodeAsyncOpen(int32_t numOfThreads) { +int32_t vnodeAsyncOpen() { int32_t code = 0; int32_t lino = 0; - // vnode-commit - code = vnodeAsyncInit(&vnodeAsyncs[1], "vnode-commit"); - TSDB_CHECK_CODE(code, lino, _exit); + int32_t numOfThreads[] = { + 0, // + tsNumOfCommitThreads, // vnode-commit + tsNumOfCommitThreads, // vnode-merge + tsNumOfCompactThreads, // vnode-compact + tsNumOfRetentionThreads, // vnode-retention + }; - code = vnodeAsyncSetWorkers(1, numOfThreads); - TSDB_CHECK_CODE(code, lino, _exit); + for (int32_t i = 1; i < sizeof(GVnodeAsyncs) / sizeof(GVnodeAsyncs[0]); i++) { + code = vnodeAsyncInit(&GVnodeAsyncs[i].async, GVnodeAsyncs[i].label); + TSDB_CHECK_CODE(code, lino, _exit); - // vnode-merge - code = vnodeAsyncInit(&vnodeAsyncs[2], "vnode-merge"); - TSDB_CHECK_CODE(code, lino, _exit); - - code = vnodeAsyncSetWorkers(2, numOfThreads); - TSDB_CHECK_CODE(code, lino, _exit); + code = vnodeAsyncSetWorkers(i, numOfThreads[i]); + TSDB_CHECK_CODE(code, lino, _exit); + } _exit: return code; } void vnodeAsyncClose() { - int32_t ret; - ret = vnodeAsyncDestroy(&vnodeAsyncs[1]); - ret = vnodeAsyncDestroy(&vnodeAsyncs[2]); + for (int32_t i = 1; i < sizeof(GVnodeAsyncs) / sizeof(GVnodeAsyncs[0]); i++) { + int32_t ret = vnodeAsyncDestroy(&GVnodeAsyncs[i].async); + } } -int32_t vnodeAsync(SVAChannelID *channelID, EVAPriority priority, int32_t (*execute)(void *), void (*cancel)(void *), - void *arg, SVATaskID *taskID) { +int32_t vnodeAsync(int64_t async, EVAPriority priority, int32_t (*execute)(void *), void (*complete)(void *), void *arg, + SVATaskID *taskID) { + SVAChannelID channelID = { + .async = async, + .id = 0, + }; + return vnodeAsyncC(&channelID, priority, execute, complete, arg, taskID); +} + +int32_t vnodeAsyncC(SVAChannelID *channelID, EVAPriority priority, int32_t (*execute)(void *), void (*cancel)(void *), + void *arg, SVATaskID *taskID) { if (channelID == NULL || channelID->async < MIN_ASYNC_ID || channelID->async > MAX_ASYNC_ID || execute == NULL || channelID->id < 0) { return TSDB_CODE_INVALID_PARA; @@ -484,7 +505,7 @@ int32_t vnodeAsync(SVAChannelID *channelID, EVAPriority priority, int32_t (*exec int32_t ret; int64_t id; - SVAsync *async = vnodeAsyncs[channelID->async]; + SVAsync *async = GVnodeAsyncs[channelID->async].async; // create task object SVATask *task = (SVATask *)taosMemoryCalloc(1, sizeof(SVATask)); @@ -594,7 +615,7 @@ void vnodeAWait(SVATaskID *taskID) { return; } - SVAsync *async = vnodeAsyncs[taskID->async]; + SVAsync *async = GVnodeAsyncs[taskID->async].async; SVATask *task = NULL; SVATask task2 = { .taskId = taskID->id, @@ -623,7 +644,7 @@ int32_t vnodeACancel(SVATaskID *taskID) { } int32_t ret = 0; - SVAsync *async = vnodeAsyncs[taskID->async]; + SVAsync *async = GVnodeAsyncs[taskID->async].async; SVATask *task = NULL; SVATask task2 = { .taskId = taskID->id, @@ -660,7 +681,7 @@ int32_t vnodeAsyncSetWorkers(int64_t asyncID, int32_t numWorkers) { return TSDB_CODE_INVALID_PARA; } int32_t ret; - SVAsync *async = vnodeAsyncs[asyncID]; + SVAsync *async = GVnodeAsyncs[asyncID].async; (void)taosThreadMutexLock(&async->mutex); async->numWorkers = numWorkers; if (async->numIdleWorkers > 0) { @@ -676,7 +697,7 @@ int32_t vnodeAChannelInit(int64_t asyncID, SVAChannelID *channelID) { return TSDB_CODE_INVALID_PARA; } - SVAsync *async = vnodeAsyncs[asyncID]; + SVAsync *async = GVnodeAsyncs[asyncID].async; // create channel object SVAChannel *channel = (SVAChannel *)taosMemoryMalloc(sizeof(SVAChannel)); @@ -722,7 +743,7 @@ int32_t vnodeAChannelDestroy(SVAChannelID *channelID, bool waitRunning) { return TSDB_CODE_INVALID_PARA; } - SVAsync *async = vnodeAsyncs[channelID->async]; + SVAsync *async = GVnodeAsyncs[channelID->async].async; SVAChannel *channel = NULL; SVAChannel channel2 = { .channelId = channelID->id, @@ -806,4 +827,19 @@ int32_t vnodeAChannelDestroy(SVAChannelID *channelID, bool waitRunning) { channelID->async = 0; channelID->id = 0; return 0; +} + +const char *vnodeGetATaskName(EVATaskT taskType) { + switch (taskType) { + case EVA_TASK_COMMIT: + return "vnode-commit"; + case EVA_TASK_MERGE: + return "vnode-merge"; + case EVA_TASK_COMPACT: + return "vnode-compact"; + case EVA_TASK_RETENTION: + return "vnode-retention"; + default: + return "unknown"; + } } \ No newline at end of file diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c index 9c153bc8a1..653f525e09 100644 --- a/source/dnode/vnode/src/vnd/vnodeCfg.c +++ b/source/dnode/vnode/src/vnd/vnodeCfg.c @@ -303,7 +303,7 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) { if (tsEncryptKey[0] == 0) { return terrno = TSDB_CODE_DNODE_INVALID_ENCRYPTKEY; } else { - tstrncpy(pCfg->tdbEncryptKey, tsEncryptKey, ENCRYPT_KEY_LEN); + tstrncpy(pCfg->tdbEncryptKey, tsEncryptKey, ENCRYPT_KEY_LEN + 1); } } #endif diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index 28d27b8893..7fd3f9a8b6 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -389,8 +389,7 @@ int vnodeAsyncCommit(SVnode *pVnode) { TSDB_CHECK_CODE(code, lino, _exit); // schedule the task - code = - vnodeAsync(&pVnode->commitChannel, EVA_PRIORITY_HIGH, vnodeCommit, vnodeCommitCancel, pInfo, &pVnode->commitTask); + code = vnodeAsync(COMMIT_TASK_ASYNC, EVA_PRIORITY_HIGH, vnodeCommit, vnodeCommitCancel, pInfo, &pVnode->commitTask); TSDB_CHECK_CODE(code, lino, _exit); _exit: diff --git a/source/dnode/vnode/src/vnd/vnodeModule.c b/source/dnode/vnode/src/vnd/vnodeModule.c index 9d326defdd..ff537ef4a7 100644 --- a/source/dnode/vnode/src/vnd/vnodeModule.c +++ b/source/dnode/vnode/src/vnd/vnodeModule.c @@ -20,14 +20,13 @@ static volatile int32_t VINIT = 0; -int vnodeInit(int nthreads, StopDnodeFp stopDnodeFp) { +int vnodeInit(StopDnodeFp stopDnodeFp) { if (atomic_val_compare_exchange_32(&VINIT, 0, 1)) { return 0; } - TAOS_CHECK_RETURN(vnodeAsyncOpen(nthreads)); + TAOS_CHECK_RETURN(vnodeAsyncOpen()); TAOS_CHECK_RETURN(walInit(stopDnodeFp)); - TAOS_CHECK_RETURN(tsdbInit()); monInitVnode(); @@ -36,7 +35,6 @@ int vnodeInit(int nthreads, StopDnodeFp stopDnodeFp) { void vnodeCleanup() { if (atomic_val_compare_exchange_32(&VINIT, 1, 0) == 0) return; - tsdbCleanUp(); vnodeAsyncClose(); walCleanUp(); smaCleanUp(); diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index b9e686932e..6de5298728 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -417,7 +417,7 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC } pVnode->path = (char *)&pVnode[1]; - tstrncpy(pVnode->path, path, strlen(path) + 1); + memcpy(pVnode->path, path, strlen(path) + 1); pVnode->config = info.config; pVnode->state.committed = info.state.committed; pVnode->state.commitTerm = info.state.commitTerm; @@ -438,11 +438,6 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC (void)taosThreadMutexInit(&pVnode->mutex, NULL); (void)taosThreadCondInit(&pVnode->poolNotEmpty, NULL); - if (vnodeAChannelInit(1, &pVnode->commitChannel) != 0) { - vError("vgId:%d, failed to init commit channel", TD_VID(pVnode)); - goto _err; - } - int8_t rollback = vnodeShouldRollback(pVnode); // open buffer pool @@ -558,10 +553,6 @@ void vnodePostClose(SVnode *pVnode) { vnodeSyncPostClose(pVnode); } void vnodeClose(SVnode *pVnode) { if (pVnode) { vnodeAWait(&pVnode->commitTask); - if (vnodeAChannelDestroy(&pVnode->commitChannel, true) != 0) { - vError("vgId:%d, failed to destroy commit channel", TD_VID(pVnode)); - } - vnodeSyncClose(pVnode); vnodeQueryClose(pVnode); tqClose(pVnode->pTq); diff --git a/source/dnode/vnode/src/vnd/vnodeSnapshot.c b/source/dnode/vnode/src/vnd/vnodeSnapshot.c index 0c11083367..9e3c6861c0 100644 --- a/source/dnode/vnode/src/vnd/vnodeSnapshot.c +++ b/source/dnode/vnode/src/vnd/vnodeSnapshot.c @@ -597,13 +597,11 @@ extern void tsdbEnableBgTask(STsdb *pTsdb); static int32_t vnodeCancelAndDisableAllBgTask(SVnode *pVnode) { TAOS_CHECK_RETURN(tsdbDisableAndCancelAllBgTask(pVnode->pTsdb)); TAOS_CHECK_RETURN(vnodeSyncCommit(pVnode)); - TAOS_CHECK_RETURN(vnodeAChannelDestroy(&pVnode->commitChannel, true)); return 0; } static int32_t vnodeEnableBgTask(SVnode *pVnode) { tsdbEnableBgTask(pVnode->pTsdb); - TAOS_CHECK_RETURN(vnodeAChannelInit(1, &pVnode->commitChannel)); return 0; } diff --git a/source/libs/catalog/CMakeLists.txt b/source/libs/catalog/CMakeLists.txt index 179781c2c9..dd7220da15 100644 --- a/source/libs/catalog/CMakeLists.txt +++ b/source/libs/catalog/CMakeLists.txt @@ -11,6 +11,6 @@ target_link_libraries( PRIVATE os util transport qcom nodes ) -# if(${BUILD_TEST}) -# ADD_SUBDIRECTORY(test) -# endif(${BUILD_TEST}) +if(${BUILD_TEST} AND NOT ${TD_WINDOWS}) + ADD_SUBDIRECTORY(test) +endif() diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index b581e31919..359bdc2b45 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -497,6 +497,8 @@ typedef struct SCtgAsyncFps { ctgDumpTaskResFp dumpResFp; ctgCompTaskFp compFp; ctgCloneTaskResFp cloneFp; + int32_t subTaskFactor; // to indicate how many sub tasks this task will generate by ctgLaunchSubTask + // default to 1, means no sub task, 2 means 1 sub task, 3 means 2 sub tasks... } SCtgAsyncFps; typedef struct SCtgApiStat { diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c index e21c1a6486..18532e7ad7 100644 --- a/source/libs/catalog/src/ctgAsync.c +++ b/source/libs/catalog/src/ctgAsync.c @@ -861,6 +861,16 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgJob** job, const int32_t taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dnodeNum + svrVerNum + dbCfgNum + indexNum + userNum + dbInfoNum + tbIndexNum + tbCfgNum + tbTagNum + viewNum + tbTsmaNum + tbNameNum; + int32_t taskNumWithSubTasks = tbMetaNum * gCtgAsyncFps[CTG_TASK_GET_TB_META].subTaskFactor + dbVgNum * gCtgAsyncFps[CTG_TASK_GET_DB_VGROUP].subTaskFactor + + udfNum * gCtgAsyncFps[CTG_TASK_GET_UDF].subTaskFactor + tbHashNum * gCtgAsyncFps[CTG_TASK_GET_TB_HASH].subTaskFactor + + qnodeNum * gCtgAsyncFps[CTG_TASK_GET_QNODE].subTaskFactor + dnodeNum * gCtgAsyncFps[CTG_TASK_GET_DNODE].subTaskFactor + + svrVerNum * gCtgAsyncFps[CTG_TASK_GET_SVR_VER].subTaskFactor + dbCfgNum * gCtgAsyncFps[CTG_TASK_GET_DB_CFG].subTaskFactor + + indexNum * gCtgAsyncFps[CTG_TASK_GET_INDEX_INFO].subTaskFactor + userNum * gCtgAsyncFps[CTG_TASK_GET_USER].subTaskFactor + + dbInfoNum * gCtgAsyncFps[CTG_TASK_GET_DB_INFO].subTaskFactor + tbIndexNum * gCtgAsyncFps[CTG_TASK_GET_TB_SMA_INDEX].subTaskFactor + + tbCfgNum * gCtgAsyncFps[CTG_TASK_GET_TB_CFG].subTaskFactor + tbTagNum * gCtgAsyncFps[CTG_TASK_GET_TB_TAG].subTaskFactor + + viewNum * gCtgAsyncFps[CTG_TASK_GET_VIEW].subTaskFactor + tbTsmaNum * gCtgAsyncFps[CTG_TASK_GET_TB_TSMA].subTaskFactor + + tsmaNum * gCtgAsyncFps[CTG_TASK_GET_TSMA].subTaskFactor + tbNameNum * gCtgAsyncFps[CTG_TASK_GET_TB_NAME].subTaskFactor; + *job = taosMemoryCalloc(1, sizeof(SCtgJob)); if (NULL == *job) { ctgError("failed to calloc, size:%d,QID:0x%" PRIx64, (int32_t)sizeof(SCtgJob), pConn->requestId); @@ -905,7 +915,7 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgJob** job, const } #endif - pJob->pTasks = taosArrayInit(taskNum, sizeof(SCtgTask)); + pJob->pTasks = taosArrayInit(taskNumWithSubTasks, sizeof(SCtgTask)); if (NULL == pJob->pTasks) { ctgError("taosArrayInit %d tasks failed", taskNum); CTG_ERR_JRET(terrno); @@ -4178,27 +4188,27 @@ int32_t ctgCloneDbVg(SCtgTask* pTask, void** pRes) { } SCtgAsyncFps gCtgAsyncFps[] = { - {ctgInitGetQnodeTask, ctgLaunchGetQnodeTask, ctgHandleGetQnodeRsp, ctgDumpQnodeRes, NULL, NULL}, - {ctgInitGetDnodeTask, ctgLaunchGetDnodeTask, ctgHandleGetDnodeRsp, ctgDumpDnodeRes, NULL, NULL}, - {ctgInitGetDbVgTask, ctgLaunchGetDbVgTask, ctgHandleGetDbVgRsp, ctgDumpDbVgRes, ctgCompDbVgTasks, ctgCloneDbVg}, - {ctgInitGetDbCfgTask, ctgLaunchGetDbCfgTask, ctgHandleGetDbCfgRsp, ctgDumpDbCfgRes, NULL, NULL}, - {ctgInitGetDbInfoTask, ctgLaunchGetDbInfoTask, ctgHandleGetDbInfoRsp, ctgDumpDbInfoRes, NULL, NULL}, + {ctgInitGetQnodeTask, ctgLaunchGetQnodeTask, ctgHandleGetQnodeRsp, ctgDumpQnodeRes, NULL, NULL, 1}, + {ctgInitGetDnodeTask, ctgLaunchGetDnodeTask, ctgHandleGetDnodeRsp, ctgDumpDnodeRes, NULL, NULL, 1}, + {ctgInitGetDbVgTask, ctgLaunchGetDbVgTask, ctgHandleGetDbVgRsp, ctgDumpDbVgRes, ctgCompDbVgTasks, ctgCloneDbVg, 1}, + {ctgInitGetDbCfgTask, ctgLaunchGetDbCfgTask, ctgHandleGetDbCfgRsp, ctgDumpDbCfgRes, NULL, NULL, 1}, + {ctgInitGetDbInfoTask, ctgLaunchGetDbInfoTask, ctgHandleGetDbInfoRsp, ctgDumpDbInfoRes, NULL, NULL, 1}, {ctgInitGetTbMetaTask, ctgLaunchGetTbMetaTask, ctgHandleGetTbMetaRsp, ctgDumpTbMetaRes, ctgCompTbMetaTasks, - ctgCloneTbMeta}, - {ctgInitGetTbHashTask, ctgLaunchGetTbHashTask, ctgHandleGetTbHashRsp, ctgDumpTbHashRes, NULL, NULL}, - {ctgInitGetTbIndexTask, ctgLaunchGetTbIndexTask, ctgHandleGetTbIndexRsp, ctgDumpTbIndexRes, NULL, NULL}, - {ctgInitGetTbCfgTask, ctgLaunchGetTbCfgTask, ctgHandleGetTbCfgRsp, ctgDumpTbCfgRes, NULL, NULL}, - {ctgInitGetIndexTask, ctgLaunchGetIndexTask, ctgHandleGetIndexRsp, ctgDumpIndexRes, NULL, NULL}, - {ctgInitGetUdfTask, ctgLaunchGetUdfTask, ctgHandleGetUdfRsp, ctgDumpUdfRes, NULL, NULL}, - {ctgInitGetUserTask, ctgLaunchGetUserTask, ctgHandleGetUserRsp, ctgDumpUserRes, NULL, NULL}, - {ctgInitGetSvrVerTask, ctgLaunchGetSvrVerTask, ctgHandleGetSvrVerRsp, ctgDumpSvrVer, NULL, NULL}, - {ctgInitGetTbMetasTask, ctgLaunchGetTbMetasTask, ctgHandleGetTbMetasRsp, ctgDumpTbMetasRes, NULL, NULL}, - {ctgInitGetTbHashsTask, ctgLaunchGetTbHashsTask, ctgHandleGetTbHashsRsp, ctgDumpTbHashsRes, NULL, NULL}, - {ctgInitGetTbTagTask, ctgLaunchGetTbTagTask, ctgHandleGetTbTagRsp, ctgDumpTbTagRes, NULL, NULL}, - {ctgInitGetViewsTask, ctgLaunchGetViewsTask, ctgHandleGetViewsRsp, ctgDumpViewsRes, NULL, NULL}, - {ctgInitGetTbTSMATask, ctgLaunchGetTbTSMATask, ctgHandleGetTbTSMARsp, ctgDumpTbTSMARes, NULL, NULL}, - {ctgInitGetTSMATask, ctgLaunchGetTSMATask, ctgHandleGetTSMARsp, ctgDumpTSMARes, NULL, NULL}, - {ctgInitGetTbNamesTask, ctgLaunchGetTbNamesTask, ctgHandleGetTbNamesRsp, ctgDumpTbNamesRes, NULL, NULL}, + ctgCloneTbMeta, 1}, + {ctgInitGetTbHashTask, ctgLaunchGetTbHashTask, ctgHandleGetTbHashRsp, ctgDumpTbHashRes, NULL, NULL, 1}, + {ctgInitGetTbIndexTask, ctgLaunchGetTbIndexTask, ctgHandleGetTbIndexRsp, ctgDumpTbIndexRes, NULL, NULL, 1}, + {ctgInitGetTbCfgTask, ctgLaunchGetTbCfgTask, ctgHandleGetTbCfgRsp, ctgDumpTbCfgRes, NULL, NULL, 2}, + {ctgInitGetIndexTask, ctgLaunchGetIndexTask, ctgHandleGetIndexRsp, ctgDumpIndexRes, NULL, NULL, 1}, + {ctgInitGetUdfTask, ctgLaunchGetUdfTask, ctgHandleGetUdfRsp, ctgDumpUdfRes, NULL, NULL, 1}, + {ctgInitGetUserTask, ctgLaunchGetUserTask, ctgHandleGetUserRsp, ctgDumpUserRes, NULL, NULL, 2}, + {ctgInitGetSvrVerTask, ctgLaunchGetSvrVerTask, ctgHandleGetSvrVerRsp, ctgDumpSvrVer, NULL, NULL, 1}, + {ctgInitGetTbMetasTask, ctgLaunchGetTbMetasTask, ctgHandleGetTbMetasRsp, ctgDumpTbMetasRes, NULL, NULL, 1}, + {ctgInitGetTbHashsTask, ctgLaunchGetTbHashsTask, ctgHandleGetTbHashsRsp, ctgDumpTbHashsRes, NULL, NULL, 1}, + {ctgInitGetTbTagTask, ctgLaunchGetTbTagTask, ctgHandleGetTbTagRsp, ctgDumpTbTagRes, NULL, NULL, 2}, + {ctgInitGetViewsTask, ctgLaunchGetViewsTask, ctgHandleGetViewsRsp, ctgDumpViewsRes, NULL, NULL, 1}, + {ctgInitGetTbTSMATask, ctgLaunchGetTbTSMATask, ctgHandleGetTbTSMARsp, ctgDumpTbTSMARes, NULL, NULL, 1}, + {ctgInitGetTSMATask, ctgLaunchGetTSMATask, ctgHandleGetTSMARsp, ctgDumpTSMARes, NULL, NULL, 1}, + {ctgInitGetTbNamesTask, ctgLaunchGetTbNamesTask, ctgHandleGetTbNamesRsp, ctgDumpTbNamesRes, NULL, NULL, 1}, }; int32_t ctgMakeAsyncRes(SCtgJob* pJob) { diff --git a/source/libs/catalog/test/catalogTests.cpp b/source/libs/catalog/test/catalogTests.cpp index 25c82b8452..7b0504504d 100644 --- a/source/libs/catalog/test/catalogTests.cpp +++ b/source/libs/catalog/test/catalogTests.cpp @@ -162,7 +162,7 @@ void ctgTestInitLogFile() { (void)ctgdEnableDebug("cache", true); (void)ctgdEnableDebug("lock", true); - if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) { + if (taosInitLog(defaultLogFileNamePrefix, 1, false) < 0) { (void)printf("failed to open log file in directory:%s\n", tsLogDir); ASSERT(0); } diff --git a/source/libs/executor/src/forecastoperator.c b/source/libs/executor/src/forecastoperator.c index a56b0dd214..2985e5e000 100644 --- a/source/libs/executor/src/forecastoperator.c +++ b/source/libs/executor/src/forecastoperator.c @@ -72,17 +72,20 @@ static FORCE_INLINE int32_t forecastEnsureBlockCapacity(SSDataBlock* pBlock, int return TSDB_CODE_SUCCESS; } -static int32_t forecastCacheBlock(SForecastSupp* pSupp, SSDataBlock* pBlock) { - if (pSupp->cachedRows > ANAL_FORECAST_MAX_ROWS) { - return TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS; - } - - int32_t code = TSDB_CODE_SUCCESS; - int32_t lino = 0; +static int32_t forecastCacheBlock(SForecastSupp* pSupp, SSDataBlock* pBlock, const char* id) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; SAnalyticBuf* pBuf = &pSupp->analBuf; - qDebug("block:%d, %p rows:%" PRId64, pSupp->numOfBlocks, pBlock, pBlock->info.rows); + if (pSupp->cachedRows > ANAL_FORECAST_MAX_ROWS) { + code = TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS; + qError("%s rows:%" PRId64 " for forecast cache, error happens, code:%s, upper limit:%d", id, pSupp->cachedRows, + tstrerror(code), ANAL_FORECAST_MAX_ROWS); + return code; + } + pSupp->numOfBlocks++; + qDebug("%s block:%d, %p rows:%" PRId64, id, pSupp->numOfBlocks, pBlock, pBlock->info.rows); for (int32_t j = 0; j < pBlock->info.rows; ++j) { SColumnInfoData* pValCol = taosArrayGet(pBlock->pDataBlock, pSupp->inputValSlot); @@ -98,10 +101,16 @@ static int32_t forecastCacheBlock(SForecastSupp* pSupp, SSDataBlock* pBlock) { pSupp->numOfRows++; code = taosAnalBufWriteColData(pBuf, 0, TSDB_DATA_TYPE_TIMESTAMP, &ts); - if (TSDB_CODE_SUCCESS != code) return code; + if (TSDB_CODE_SUCCESS != code) { + qError("%s failed to write ts in buf, code:%s", id, tstrerror(code)); + return code; + } code = taosAnalBufWriteColData(pBuf, 1, valType, val); - if (TSDB_CODE_SUCCESS != code) return code; + if (TSDB_CODE_SUCCESS != code) { + qError("%s failed to write val in buf, code:%s", id, tstrerror(code)); + return code; + } } return 0; @@ -394,7 +403,7 @@ static int32_t forecastNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { pSupp->cachedRows += pBlock->info.rows; qDebug("%s group:%" PRId64 ", blocks:%d, rows:%" PRId64 ", total rows:%" PRId64, pId, pSupp->groupId, numOfBlocks, pBlock->info.rows, pSupp->cachedRows); - code = forecastCacheBlock(pSupp, pBlock); + code = forecastCacheBlock(pSupp, pBlock, pId); QUERY_CHECK_CODE(code, lino, _end); } else { qDebug("%s group:%" PRId64 ", read finish for new group coming, blocks:%d", pId, pSupp->groupId, numOfBlocks); @@ -405,7 +414,7 @@ static int32_t forecastNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { pSupp->cachedRows = pBlock->info.rows; qDebug("%s group:%" PRId64 ", new group, rows:%" PRId64 ", total rows:%" PRId64, pId, pSupp->groupId, pBlock->info.rows, pSupp->cachedRows); - code = forecastCacheBlock(pSupp, pBlock); + code = forecastCacheBlock(pSupp, pBlock, pId); QUERY_CHECK_CODE(code, lino, _end); } diff --git a/source/libs/executor/src/hashjoin.c b/source/libs/executor/src/hashjoin.c index f63b4093db..da7686cce6 100755 --- a/source/libs/executor/src/hashjoin.c +++ b/source/libs/executor/src/hashjoin.c @@ -83,6 +83,8 @@ int32_t hInnerJoinDo(struct SOperatorInfo* pOperator) { return code; } +#ifdef HASH_JOIN_FULL + int32_t hLeftJoinHandleSeqRowRemains(struct SOperatorInfo* pOperator, SHJoinOperatorInfo* pJoin, bool* loopCont) { bool allFetched = false; SHJoinCtx* pCtx = &pJoin->ctx; @@ -346,4 +348,5 @@ int32_t hLeftJoinDo(struct SOperatorInfo* pOperator) { return TSDB_CODE_SUCCESS; } +#endif diff --git a/source/libs/executor/src/hashjoinoperator.c b/source/libs/executor/src/hashjoinoperator.c index 64ce62cb66..73a5139e43 100644 --- a/source/libs/executor/src/hashjoinoperator.c +++ b/source/libs/executor/src/hashjoinoperator.c @@ -89,7 +89,7 @@ int32_t hJoinSetImplFp(SHJoinOperatorInfo* pJoin) { case JOIN_TYPE_RIGHT: { switch (pJoin->subType) { case JOIN_STYPE_OUTER: - pJoin->joinFp = hLeftJoinDo; + //pJoin->joinFp = hLeftJoinDo; TOOPEN break; default: break; diff --git a/source/libs/executor/test/CMakeLists.txt b/source/libs/executor/test/CMakeLists.txt index cb1f951c94..4136640847 100644 --- a/source/libs/executor/test/CMakeLists.txt +++ b/source/libs/executor/test/CMakeLists.txt @@ -44,3 +44,15 @@ TARGET_INCLUDE_DIRECTORIES( PUBLIC "${TD_SOURCE_DIR}/include/common" PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) + +ADD_EXECUTABLE(execUtilTests execUtilTests.cpp) +TARGET_LINK_LIBRARIES( + execUtilTests + PRIVATE os util common executor gtest_main qcom function planner scalar nodes vnode +) + +TARGET_INCLUDE_DIRECTORIES( + execUtilTests + PUBLIC "${TD_SOURCE_DIR}/include/common" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../inc" +) diff --git a/source/libs/executor/test/execUtilTests.cpp b/source/libs/executor/test/execUtilTests.cpp new file mode 100644 index 0000000000..61b69fb9cf --- /dev/null +++ b/source/libs/executor/test/execUtilTests.cpp @@ -0,0 +1,35 @@ +#include "gtest/gtest.h" + +#include "executil.h" + +TEST(execUtilTest, resRowTest) { + SDiskbasedBuf *pBuf = nullptr; + int32_t pageSize = 32; + int32_t numPages = 3; + int32_t code = createDiskbasedBuf(&pBuf, pageSize, pageSize * numPages, "test_buf", "/"); + EXPECT_EQ(code, TSDB_CODE_SUCCESS); + + std::vector pages(numPages); + std::vector pageIds(numPages); + for (int32_t i = 0; i < numPages; ++i) { + pages[i] = getNewBufPage(pBuf, &pageIds[i]); + EXPECT_NE(pages[i], nullptr); + EXPECT_EQ(pageIds[i], i); + } + + EXPECT_EQ(getNewBufPage(pBuf, nullptr), nullptr); + + SResultRowPosition pos; + pos.offset = 0; + for (int32_t i = 0; i < numPages; ++i) { + pos.pageId = pageIds[i]; + bool forUpdate = i & 0x1; + SResultRow *row = getResultRowByPos(pBuf, &pos, forUpdate); + EXPECT_EQ((void *)row, pages[i]); + } + + pos.pageId = numPages + 1; + EXPECT_EQ(getResultRowByPos(pBuf, &pos, true), nullptr); + + destroyDiskbasedBuf(pBuf); +} diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index 4efa8764e5..b057194cdb 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -668,8 +668,8 @@ int32_t encodeUdfCallRequest(void **buf, const SUdfCallRequest *call) { len += tEncodeDataBlock(buf, &call->block); len += encodeUdfInterBuf(buf, &call->interBuf); } else if (call->callType == TSDB_UDF_CALL_AGG_MERGE) { - len += encodeUdfInterBuf(buf, &call->interBuf); - len += encodeUdfInterBuf(buf, &call->interBuf2); + // len += encodeUdfInterBuf(buf, &call->interBuf); + // len += encodeUdfInterBuf(buf, &call->interBuf2); } else if (call->callType == TSDB_UDF_CALL_AGG_FIN) { len += encodeUdfInterBuf(buf, &call->interBuf); } @@ -690,10 +690,10 @@ void *decodeUdfCallRequest(const void *buf, SUdfCallRequest *call) { buf = tDecodeDataBlock(buf, &call->block); buf = decodeUdfInterBuf(buf, &call->interBuf); break; - case TSDB_UDF_CALL_AGG_MERGE: - buf = decodeUdfInterBuf(buf, &call->interBuf); - buf = decodeUdfInterBuf(buf, &call->interBuf2); - break; + // case TSDB_UDF_CALL_AGG_MERGE: + // buf = decodeUdfInterBuf(buf, &call->interBuf); + // buf = decodeUdfInterBuf(buf, &call->interBuf2); + // break; case TSDB_UDF_CALL_AGG_FIN: buf = decodeUdfInterBuf(buf, &call->interBuf); break; @@ -779,9 +779,9 @@ int32_t encodeUdfCallResponse(void **buf, const SUdfCallResponse *callRsp) { case TSDB_UDF_CALL_AGG_PROC: len += encodeUdfInterBuf(buf, &callRsp->resultBuf); break; - case TSDB_UDF_CALL_AGG_MERGE: - len += encodeUdfInterBuf(buf, &callRsp->resultBuf); - break; + // case TSDB_UDF_CALL_AGG_MERGE: + // len += encodeUdfInterBuf(buf, &callRsp->resultBuf); + // break; case TSDB_UDF_CALL_AGG_FIN: len += encodeUdfInterBuf(buf, &callRsp->resultBuf); break; @@ -801,9 +801,9 @@ void *decodeUdfCallResponse(const void *buf, SUdfCallResponse *callRsp) { case TSDB_UDF_CALL_AGG_PROC: buf = decodeUdfInterBuf(buf, &callRsp->resultBuf); break; - case TSDB_UDF_CALL_AGG_MERGE: - buf = decodeUdfInterBuf(buf, &callRsp->resultBuf); - break; + // case TSDB_UDF_CALL_AGG_MERGE: + // buf = decodeUdfInterBuf(buf, &callRsp->resultBuf); + // break; case TSDB_UDF_CALL_AGG_FIN: buf = decodeUdfInterBuf(buf, &callRsp->resultBuf); break; @@ -1129,8 +1129,9 @@ int32_t callUdf(UdfcFuncHandle handle, int8_t callType, SSDataBlock *input, SUdf SSDataBlock *output, SUdfInterBuf *newState); int32_t doCallUdfAggInit(UdfcFuncHandle handle, SUdfInterBuf *interBuf); int32_t doCallUdfAggProcess(UdfcFuncHandle handle, SSDataBlock *block, SUdfInterBuf *state, SUdfInterBuf *newState); -int32_t doCallUdfAggMerge(UdfcFuncHandle handle, SUdfInterBuf *interBuf1, SUdfInterBuf *interBuf2, - SUdfInterBuf *resultBuf); +// udf todo: aggmerge +// int32_t doCallUdfAggMerge(UdfcFuncHandle handle, SUdfInterBuf *interBuf1, SUdfInterBuf *interBuf2, +// SUdfInterBuf *resultBuf); int32_t doCallUdfAggFinalize(UdfcFuncHandle handle, SUdfInterBuf *interBuf, SUdfInterBuf *resultData); int32_t doCallUdfScalarFunc(UdfcFuncHandle handle, SScalarParam *input, int32_t numOfCols, SScalarParam *output); int32_t callUdfScalarFunc(char *udfName, SScalarParam *input, int32_t numOfCols, SScalarParam *output); @@ -2176,11 +2177,11 @@ int32_t callUdf(UdfcFuncHandle handle, int8_t callType, SSDataBlock *input, SUdf req->interBuf = *state; break; } - case TSDB_UDF_CALL_AGG_MERGE: { - req->interBuf = *state; - req->interBuf2 = *state2; - break; - } + // case TSDB_UDF_CALL_AGG_MERGE: { + // req->interBuf = *state; + // req->interBuf2 = *state2; + // break; + // } case TSDB_UDF_CALL_AGG_FIN: { req->interBuf = *state; break; @@ -2205,10 +2206,10 @@ int32_t callUdf(UdfcFuncHandle handle, int8_t callType, SSDataBlock *input, SUdf *newState = rsp->resultBuf; break; } - case TSDB_UDF_CALL_AGG_MERGE: { - *newState = rsp->resultBuf; - break; - } + // case TSDB_UDF_CALL_AGG_MERGE: { + // *newState = rsp->resultBuf; + // break; + // } case TSDB_UDF_CALL_AGG_FIN: { *newState = rsp->resultBuf; break; @@ -2241,12 +2242,13 @@ int32_t doCallUdfAggProcess(UdfcFuncHandle handle, SSDataBlock *block, SUdfInter // input: interbuf1, interbuf2 // output: resultBuf -int32_t doCallUdfAggMerge(UdfcFuncHandle handle, SUdfInterBuf *interBuf1, SUdfInterBuf *interBuf2, - SUdfInterBuf *resultBuf) { - int8_t callType = TSDB_UDF_CALL_AGG_MERGE; - int32_t err = callUdf(handle, callType, NULL, interBuf1, interBuf2, NULL, resultBuf); - return err; -} +// udf todo: aggmerge +// int32_t doCallUdfAggMerge(UdfcFuncHandle handle, SUdfInterBuf *interBuf1, SUdfInterBuf *interBuf2, +// SUdfInterBuf *resultBuf) { +// int8_t callType = TSDB_UDF_CALL_AGG_MERGE; +// int32_t err = callUdf(handle, callType, NULL, interBuf1, interBuf2, NULL, resultBuf); +// return err; +// } // input: interBuf // output: resultData diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c index bbfd43d5f7..ecb24fc77a 100644 --- a/source/libs/function/src/udfd.c +++ b/source/libs/function/src/udfd.c @@ -194,17 +194,17 @@ int32_t udfdCPluginUdfAggProc(SUdfDataBlock *block, SUdfInterBuf *interBuf, SUdf } } -int32_t udfdCPluginUdfAggMerge(SUdfInterBuf *inputBuf1, SUdfInterBuf *inputBuf2, SUdfInterBuf *outputBuf, - void *udfCtx) { - TAOS_UDF_CHECK_PTR_RCODE(inputBuf1, inputBuf2, outputBuf, udfCtx); - SUdfCPluginCtx *ctx = udfCtx; - if (ctx->aggMergeFunc) { - return ctx->aggMergeFunc(inputBuf1, inputBuf2, outputBuf); - } else { - fnError("udfd c plugin aggregation merge not implemented"); - return TSDB_CODE_UDF_FUNC_EXEC_FAILURE; - } -} +// int32_t udfdCPluginUdfAggMerge(SUdfInterBuf *inputBuf1, SUdfInterBuf *inputBuf2, SUdfInterBuf *outputBuf, +// void *udfCtx) { +// TAOS_UDF_CHECK_PTR_RCODE(inputBuf1, inputBuf2, outputBuf, udfCtx); +// SUdfCPluginCtx *ctx = udfCtx; +// if (ctx->aggMergeFunc) { +// return ctx->aggMergeFunc(inputBuf1, inputBuf2, outputBuf); +// } else { +// fnError("udfd c plugin aggregation merge not implemented"); +// return TSDB_CODE_UDF_FUNC_EXEC_FAILURE; +// } +// } int32_t udfdCPluginUdfAggFinish(SUdfInterBuf *buf, SUdfInterBuf *resultData, void *udfCtx) { TAOS_UDF_CHECK_PTR_RCODE(buf, resultData, udfCtx); @@ -378,7 +378,7 @@ int32_t udfdInitializeCPlugin(SUdfScriptPlugin *plugin) { plugin->udfScalarProcFunc = udfdCPluginUdfScalarProc; plugin->udfAggStartFunc = udfdCPluginUdfAggStart; plugin->udfAggProcFunc = udfdCPluginUdfAggProc; - plugin->udfAggMergeFunc = udfdCPluginUdfAggMerge; + // plugin->udfAggMergeFunc = udfdCPluginUdfAggMerge; plugin->udfAggFinishFunc = udfdCPluginUdfAggFinish; SScriptUdfEnvItem items[1] = {{"LD_LIBRARY_PATH", tsUdfdLdLibPath}}; @@ -889,19 +889,19 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) { break; } - case TSDB_UDF_CALL_AGG_MERGE: { - SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize), .bufLen = udf->bufSize, .numOfResult = 0}; - if (outBuf.buf != NULL) { - code = udf->scriptPlugin->udfAggMergeFunc(&call->interBuf, &call->interBuf2, &outBuf, udf->scriptUdfCtx); - freeUdfInterBuf(&call->interBuf); - freeUdfInterBuf(&call->interBuf2); - subRsp->resultBuf = outBuf; - } else { - code = terrno; - } - - break; - } + // case TSDB_UDF_CALL_AGG_MERGE: { + // SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize), .bufLen = udf->bufSize, .numOfResult = 0}; + // if (outBuf.buf != NULL) { + // code = udf->scriptPlugin->udfAggMergeFunc(&call->interBuf, &call->interBuf2, &outBuf, udf->scriptUdfCtx); + // freeUdfInterBuf(&call->interBuf); + // freeUdfInterBuf(&call->interBuf2); + // subRsp->resultBuf = outBuf; + // } else { + // code = terrno; + // } + // + // break; + // } case TSDB_UDF_CALL_AGG_FIN: { SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize), .bufLen = udf->bufSize, .numOfResult = 0}; if (outBuf.buf != NULL) { @@ -959,10 +959,10 @@ _exit: freeUdfInterBuf(&subRsp->resultBuf); break; } - case TSDB_UDF_CALL_AGG_MERGE: { - freeUdfInterBuf(&subRsp->resultBuf); - break; - } + // case TSDB_UDF_CALL_AGG_MERGE: { + // freeUdfInterBuf(&subRsp->resultBuf); + // break; + // } case TSDB_UDF_CALL_AGG_FIN: { freeUdfInterBuf(&subRsp->resultBuf); break; @@ -1667,7 +1667,6 @@ static int32_t udfdGlobalDataInit() { } static void udfdGlobalDataDeinit() { - taosHashCleanup(global.udfsHash); uv_mutex_destroy(&global.udfsMutex); uv_mutex_destroy(&global.scriptPluginsMutex); taosMemoryFreeClear(global.loop); @@ -1720,8 +1719,11 @@ void udfdDeinitResidentFuncs() { SUdf **udfInHash = taosHashGet(global.udfsHash, funcName, strlen(funcName)); if (udfInHash) { SUdf *udf = *udfInHash; - int32_t code = udf->scriptPlugin->udfDestroyFunc(udf->scriptUdfCtx); - fnDebug("udfd destroy function returns %d", code); + int32_t code = 0; + if (udf->scriptPlugin->udfDestroyFunc) { + code = udf->scriptPlugin->udfDestroyFunc(udf->scriptUdfCtx); + fnDebug("udfd %s destroy function returns %d", funcName, code); + } if(taosHashRemove(global.udfsHash, funcName, strlen(funcName)) != 0) { fnError("udfd remove resident function %s failed", funcName); @@ -1729,6 +1731,7 @@ void udfdDeinitResidentFuncs() { taosMemoryFree(udf); } } + taosHashCleanup(global.udfsHash); taosArrayDestroy(global.residentFuncs); fnInfo("udfd resident functions are deinit"); } @@ -1838,15 +1841,15 @@ int main(int argc, char *argv[]) { fnInfo("udfd exit normally"); removeListeningPipe(); - udfdDeinitScriptPlugins(); _exit: - if (globalDataInited) { - udfdGlobalDataDeinit(); - } if (residentFuncsInited) { udfdDeinitResidentFuncs(); } + udfdDeinitScriptPlugins(); + if (globalDataInited) { + udfdGlobalDataDeinit(); + } if (udfSourceDirInited) { udfdDestroyUdfSourceDir(); } diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c index 02e5bd34a6..1d1bc66414 100644 --- a/source/libs/index/src/indexFilter.c +++ b/source/libs/index/src/indexFilter.c @@ -326,7 +326,7 @@ static int32_t sifInitParam(SNode *node, SIFParam *param, SIFCtx *ctx) { indexError("invalid length for node:%p, length: %d", node, LIST_LENGTH(nl->pNodeList)); SIF_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); } - SIF_ERR_RET(scalarGenerateSetFromList((void **)¶m->pFilter, node, nl->node.resType.type)); + SIF_ERR_RET(scalarGenerateSetFromList((void **)¶m->pFilter, node, nl->node.resType.type, 0)); if (taosHashPut(ctx->pRes, &node, POINTER_BYTES, param, sizeof(*param))) { taosHashCleanup(param->pFilter); param->pFilter = NULL; diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 4fc83c7c41..5fc5f5fafe 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -116,7 +116,7 @@ static bool checkPassword(SAstCreateContext* pCxt, const SToken* pPasswordToken, strncpy(pPassword, pPasswordToken->z, pPasswordToken->n); (void)strdequote(pPassword); if (strtrim(pPassword) <= 0) { - pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_PASSWD_EMPTY); + pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_PASSWD_TOO_SHORT_OR_EMPTY); } else if (invalidPassword(pPassword)) { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_PASSWD); } diff --git a/source/libs/parser/src/parCalcConst.c b/source/libs/parser/src/parCalcConst.c index 99be9b07b0..0f8c8ee034 100644 --- a/source/libs/parser/src/parCalcConst.c +++ b/source/libs/parser/src/parCalcConst.c @@ -611,7 +611,12 @@ static void resetProjectNullType(SNode* pStmt) { resetProjectNullTypeImpl(((SSelectStmt*)pStmt)->pProjectionList); break; case QUERY_NODE_SET_OPERATOR: { - resetProjectNullTypeImpl(((SSetOperator*)pStmt)->pProjectionList); + SSetOperator* pSetOp = (SSetOperator*)pStmt; + resetProjectNullTypeImpl(pSetOp->pProjectionList); + if (pSetOp->pLeft) + resetProjectNullType(pSetOp->pLeft); + if (pSetOp->pRight) + resetProjectNullType(pSetOp->pRight); break; } default: diff --git a/source/libs/parser/src/parInsertStmt.c b/source/libs/parser/src/parInsertStmt.c index 4ecc18d189..37d7b2f431 100644 --- a/source/libs/parser/src/parInsertStmt.c +++ b/source/libs/parser/src/parInsertStmt.c @@ -368,7 +368,7 @@ int32_t qBindStmtStbColsValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind // } } - code = tRowBuildFromBind(pBindInfos, boundInfo->numOfBound, colInOrder, *pTSchema, pCols); + code = tRowBuildFromBind(pBindInfos, boundInfo->numOfBound, colInOrder, *pTSchema, pCols, &pDataBlock->ordered, &pDataBlock->duplicateTs); qDebug("stmt all %d columns bind %d rows data", boundInfo->numOfBound, rowNum); @@ -745,7 +745,7 @@ int32_t qBindStmtStbColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bin pBindInfos[c].bytes = pColSchema->bytes; } - code = tRowBuildFromBind2(pBindInfos, boundInfo->numOfBound, colInOrder, *pTSchema, pCols); + code = tRowBuildFromBind2(pBindInfos, boundInfo->numOfBound, colInOrder, *pTSchema, pCols, &pDataBlock->ordered, &pDataBlock->duplicateTs); qDebug("stmt all %d columns bind %d rows data", boundInfo->numOfBound, rowNum); diff --git a/source/libs/parser/src/parInsertUtil.c b/source/libs/parser/src/parInsertUtil.c index 502dbb57dd..ed1f498a32 100644 --- a/source/libs/parser/src/parInsertUtil.c +++ b/source/libs/parser/src/parInsertUtil.c @@ -101,10 +101,13 @@ int32_t insCreateSName(SName* pName, SToken* pTableName, int32_t acctId, const c return buildInvalidOperationMsg(pMsgBuf, msg1); } } else { // get current DB name first, and then set it into path - if (pTableName->n >= TSDB_TABLE_NAME_LEN) { + char tbname[TSDB_TABLE_FNAME_LEN] = {0}; + strncpy(tbname, pTableName->z, pTableName->n); + int32_t tbLen = strdequote(tbname); + if (tbLen >= TSDB_TABLE_NAME_LEN) { return buildInvalidOperationMsg(pMsgBuf, msg1); } - if (pTableName->n == 0) { + if (tbLen == 0) { return generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_INVALID_IDENTIFIER_NAME, "invalid table name"); } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index f20755ad93..15b8bf2e89 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -2276,6 +2276,7 @@ static bool dataTypeEqual(const SDataType* l, const SDataType* r) { // 0 means equal, 1 means the left shall prevail, -1 means the right shall prevail static int32_t dataTypeComp(const SDataType* l, const SDataType* r) { + if (l->type == TSDB_DATA_TYPE_NULL) return -1; if (l->type != r->type) { return 1; } @@ -11416,7 +11417,7 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm if (pSelect->hasInterpFunc) { // Temporary code - if (pStmt->pOptions->triggerType != STREAM_TRIGGER_FORCE_WINDOW_CLOSE) { + if (tsStreamCoverage == false && pStmt->pOptions->triggerType != STREAM_TRIGGER_FORCE_WINDOW_CLOSE) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Stream interp function only support force window close"); } diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index 9706644324..0cda428487 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -57,8 +57,8 @@ static char* getSyntaxErrFormat(int32_t errCode) { return "Invalid tag name: %s"; case TSDB_CODE_PAR_NAME_OR_PASSWD_TOO_LONG: return "Name or password too long"; - case TSDB_CODE_PAR_PASSWD_EMPTY: - return "Password can not be empty"; + case TSDB_CODE_PAR_PASSWD_TOO_SHORT_OR_EMPTY: + return "Password too short or empty"; case TSDB_CODE_PAR_INVALID_PORT: return "Port should be an integer that is less than 65535 and greater than 0"; case TSDB_CODE_PAR_INVALID_ENDPOINT: diff --git a/source/libs/qcom/src/queryUtil.c b/source/libs/qcom/src/queryUtil.c index 690d38aac0..6d637bee98 100644 --- a/source/libs/qcom/src/queryUtil.c +++ b/source/libs/qcom/src/queryUtil.c @@ -137,6 +137,8 @@ static void processTaskQueue(SQueueInfo *pInfo, SSchedMsg *pSchedMsg) { } int32_t initTaskQueue() { + memset(&taskQueue, 0, sizeof(taskQueue)); + taskQueue.wrokrerPool.name = "taskWorkPool"; taskQueue.wrokrerPool.min = tsNumOfTaskQueueThreads; taskQueue.wrokrerPool.max = tsNumOfTaskQueueThreads; diff --git a/source/libs/qworker/src/qwMem.c b/source/libs/qworker/src/qwMem.c index d625bb113a..69d4093221 100644 --- a/source/libs/qworker/src/qwMem.c +++ b/source/libs/qworker/src/qwMem.c @@ -153,6 +153,7 @@ int32_t qwRetrieveJobInfo(QW_FPARAMS_DEF, SQWJobInfo** ppJob) { if (atomic_load_8(&pJob->destroyed)) { QW_UNLOCK(QW_READ, &pJob->lock); + taosHashRelease(gQueryMgmt.pJobInfo, pJob); continue; } diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c index 2809f05590..3d41b6ef5e 100644 --- a/source/libs/qworker/src/qwUtil.c +++ b/source/libs/qworker/src/qwUtil.c @@ -258,7 +258,7 @@ int32_t qwAddTaskCtxImpl(QW_FPARAMS_DEF, bool acquire, SQWTaskCtx **ctx) { } } - atomic_add_fetch_64(&gQueryMgmt.stat.taskInitNum, 1); + (void)atomic_add_fetch_64(&gQueryMgmt.stat.taskInitNum, 1); if (acquire && ctx) { QW_RET(qwAcquireTaskCtx(QW_FPARAMS(), ctx)); @@ -283,7 +283,7 @@ void qwFreeTaskHandle(SQWTaskCtx *ctx) { qDestroyTask(otaskHandle); taosDisableMemPoolUsage(); - atomic_add_fetch_64(&gQueryMgmt.stat.taskExecDestroyNum, 1); + (void)atomic_add_fetch_64(&gQueryMgmt.stat.taskExecDestroyNum, 1); qDebug("task handle destroyed"); } @@ -297,7 +297,7 @@ void qwFreeSinkHandle(SQWTaskCtx *ctx) { dsDestroyDataSinker(osinkHandle); QW_SINK_DISABLE_MEMPOOL(); - atomic_add_fetch_64(&gQueryMgmt.stat.taskSinkDestroyNum, 1); + (void)atomic_add_fetch_64(&gQueryMgmt.stat.taskSinkDestroyNum, 1); qDebug("sink handle destroyed"); } @@ -409,6 +409,8 @@ int32_t qwDropTaskCtx(QW_FPARAMS_DEF) { if (ctx->pJobInfo && TSDB_CODE_SUCCESS != ctx->pJobInfo->errCode) { QW_UPDATE_RSP_CODE(ctx, ctx->pJobInfo->errCode); + } else { + QW_UPDATE_RSP_CODE(ctx, TSDB_CODE_TSC_QUERY_CANCELLED); } atomic_store_ptr(&ctx->taskHandle, NULL); @@ -428,7 +430,7 @@ int32_t qwDropTaskCtx(QW_FPARAMS_DEF) { QW_TASK_DLOG_E("task ctx dropped"); - atomic_add_fetch_64(&gQueryMgmt.stat.taskDestroyNum, 1); + (void)atomic_add_fetch_64(&gQueryMgmt.stat.taskDestroyNum, 1); return code; } diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index 641fa03f7a..4a9eea66e2 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -449,51 +449,64 @@ int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, SDeleteRes *pRes return TSDB_CODE_SUCCESS; } -int32_t qwQuickRspFetchReq(QW_FPARAMS_DEF, SQWTaskCtx *ctx, SQWMsg *qwMsg, int32_t code) { - if (QUERY_RSP_POLICY_QUICK == tsQueryRspPolicy && ctx != NULL) { - if (QW_EVENT_RECEIVED(ctx, QW_EVENT_FETCH)) { - void *rsp = NULL; - int32_t dataLen = 0; - int32_t rawLen = 0; - SOutputData sOutput = {0}; - if (TSDB_CODE_SUCCESS == code) { - code = qwGetQueryResFromSink(QW_FPARAMS(), ctx, &dataLen, &rawLen, &rsp, &sOutput); +int32_t qwQuickRspFetchReq(QW_FPARAMS_DEF, SQWMsg *qwMsg, int32_t code) { + if (QUERY_RSP_POLICY_QUICK != tsQueryRspPolicy) { + return TSDB_CODE_SUCCESS; + } + + SQWTaskCtx *ctx = NULL; + QW_ERR_JRET(qwAcquireTaskCtx(QW_FPARAMS(), &ctx)); + + if (!QW_EVENT_RECEIVED(ctx, QW_EVENT_FETCH)) { + goto _return; + } + + void *rsp = NULL; + int32_t dataLen = 0; + int32_t rawLen = 0; + SOutputData sOutput = {0}; + if (TSDB_CODE_SUCCESS == code) { + code = qwGetQueryResFromSink(QW_FPARAMS(), ctx, &dataLen, &rawLen, &rsp, &sOutput); + } + + if (code) { + qwFreeFetchRsp(rsp); + rsp = NULL; + dataLen = 0; + } + + if (NULL == rsp && TSDB_CODE_SUCCESS == code) { + goto _return; + } + + if (NULL != rsp) { + bool qComplete = (DS_BUF_EMPTY == sOutput.bufStatus && sOutput.queryEnd); + + qwBuildFetchRsp(rsp, &sOutput, dataLen, rawLen, qComplete); + if (qComplete) { + atomic_store_8((int8_t *)&ctx->queryEnd, true); + if (!ctx->dynamicTask) { + qwFreeSinkHandle(ctx); } - - if (code) { - qwFreeFetchRsp(rsp); - rsp = NULL; - dataLen = 0; - } - - if (NULL == rsp && TSDB_CODE_SUCCESS == code) { - return TSDB_CODE_SUCCESS; - } - - if (NULL != rsp) { - bool qComplete = (DS_BUF_EMPTY == sOutput.bufStatus && sOutput.queryEnd); - - qwBuildFetchRsp(rsp, &sOutput, dataLen, rawLen, qComplete); - if (qComplete) { - atomic_store_8((int8_t *)&ctx->queryEnd, true); - if (!ctx->dynamicTask) { - qwFreeSinkHandle(ctx); - } - } - } - - qwMsg->connInfo = ctx->dataConnInfo; - QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_FETCH); - - QW_ERR_RET(qwBuildAndSendFetchRsp(ctx, ctx->fetchMsgType + 1, &qwMsg->connInfo, rsp, dataLen, code)); - rsp = NULL; - - QW_TASK_DLOG("fetch rsp send, handle:%p, code:%x - %s, dataLen:%d", qwMsg->connInfo.handle, code, tstrerror(code), - dataLen); } } - return TSDB_CODE_SUCCESS; + qwMsg->connInfo = ctx->dataConnInfo; + QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_FETCH); + + QW_ERR_JRET(qwBuildAndSendFetchRsp(ctx, ctx->fetchMsgType + 1, &qwMsg->connInfo, rsp, dataLen, code)); + rsp = NULL; + + QW_TASK_DLOG("fetch rsp send, handle:%p, code:%x - %s, dataLen:%d", qwMsg->connInfo.handle, code, tstrerror(code), + dataLen); + +_return: + + if (ctx) { + qwReleaseTaskCtx(mgmt, ctx); + } + + return code; } int32_t qwStartDynamicTaskNewExec(QW_FPARAMS_DEF, SQWTaskCtx *ctx, SQWMsg *qwMsg) { @@ -748,6 +761,13 @@ int32_t qwPreprocessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) { QW_ERR_JRET(qwAcquireTaskCtx(QW_FPARAMS(), &ctx)); + QW_LOCK(QW_WRITE, &ctx->lock); + + if (QW_EVENT_PROCESSED(ctx, QW_EVENT_DROP) || QW_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) { + QW_TASK_WLOG("task dropping or already dropped, drop event:%d", QW_GET_EVENT(ctx, QW_EVENT_DROP)); + QW_ERR_JRET(ctx->rspCode); + } + ctx->ctrlConnInfo = qwMsg->connInfo; ctx->sId = sId; ctx->phase = -1; @@ -767,7 +787,8 @@ _return: if (code) { (void)qwDropTask(QW_FPARAMS()); } - + + QW_UNLOCK(QW_WRITE, &ctx->lock); qwReleaseTaskCtx(mgmt, ctx); } @@ -848,7 +869,7 @@ _return: input.msgType = qwMsg->msgType; code = qwHandlePostPhaseEvents(QW_FPARAMS(), QW_PHASE_POST_QUERY, &input, NULL); - QW_ERR_RET(qwQuickRspFetchReq(QW_FPARAMS(), ctx, qwMsg, code)); + QW_ERR_RET(qwQuickRspFetchReq(QW_FPARAMS(), qwMsg, code)); QW_RET(TSDB_CODE_SUCCESS); } @@ -1028,12 +1049,16 @@ _return: if (code || rsp) { bool rsped = false; + + ctx = NULL; + (void)qwAcquireTaskCtx(QW_FPARAMS(), &ctx); + if (ctx) { qwDbgSimulateRedirect(qwMsg, ctx, &rsped); qwDbgSimulateDead(QW_FPARAMS(), ctx, &rsped); } - if (!rsped) { + if (!rsped && ctx) { code = qwBuildAndSendFetchRsp(ctx, qwMsg->msgType + 1, &qwMsg->connInfo, rsp, dataLen, code); if (TSDB_CODE_SUCCESS != code) { QW_TASK_ELOG("fetch rsp send fail, msgType:%s, handle:%p, code:%x - %s, dataLen:%d", @@ -1046,6 +1071,8 @@ _return: qwFreeFetchRsp(rsp); rsp = NULL; } + + qwReleaseTaskCtx(mgmt, ctx); } else { // qwQuickRspFetchReq(QW_FPARAMS(), ctx, qwMsg, code); } diff --git a/source/libs/scalar/inc/filterInt.h b/source/libs/scalar/inc/filterInt.h index 9adc9ee99c..34f100a2bd 100644 --- a/source/libs/scalar/inc/filterInt.h +++ b/source/libs/scalar/inc/filterInt.h @@ -236,6 +236,7 @@ typedef struct SFltBuildGroupCtx { SFilterInfo *info; SArray *group; int32_t code; + bool ignore; } SFltBuildGroupCtx; typedef struct { @@ -266,6 +267,7 @@ struct SFilterInfo { int8_t *blkUnitRes; void *pTable; SArray *blkList; + bool isStrict; SFilterPCtx pctx; }; diff --git a/source/libs/scalar/inc/sclInt.h b/source/libs/scalar/inc/sclInt.h index 34fd5dc8b0..8caa3edf42 100644 --- a/source/libs/scalar/inc/sclInt.h +++ b/source/libs/scalar/inc/sclInt.h @@ -21,6 +21,8 @@ extern "C" { #include "query.h" #include "tcommon.h" #include "thash.h" +#include "querynodes.h" +#include "function.h" typedef struct SOperatorValueType { int32_t opResType; @@ -141,12 +143,14 @@ int32_t sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode); #define GET_PARAM_PRECISON(_c) ((_c)->columnData->info.precision) void sclFreeParam(SScalarParam* param); -int32_t doVectorCompare(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t startIndex, int32_t numOfRows, +int32_t doVectorCompare(SScalarParam* pLeft, SScalarParam *pLeftVar, SScalarParam* pRight, SScalarParam *pOut, int32_t startIndex, int32_t numOfRows, int32_t _ord, int32_t optr); int32_t vectorCompareImpl(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t startIndex, int32_t numOfRows, int32_t _ord, int32_t optr); int32_t vectorCompare(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t _ord, int32_t optr); +bool checkOperatorRestypeIsTimestamp(EOperatorType opType, int32_t ldt, int32_t rdt); + #ifdef __cplusplus } #endif diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index f0325dd174..b329bbbd44 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ #include +#include "nodes.h" #include "os.h" #include "tglobal.h" #include "thash.h" @@ -1284,7 +1285,8 @@ static void filterFreeGroup(void *pItem) { taosMemoryFreeClear(p->unitFlags); } -int32_t fltAddGroupUnitFromNode(SFilterInfo *info, SNode *tree, SArray *group) { +int32_t fltAddGroupUnitFromNode(void *pContext, SFilterInfo *info, SNode *tree, SArray *group) { + SFltBuildGroupCtx *ctx = (SFltBuildGroupCtx *)pContext; SOperatorNode *node = (SOperatorNode *)tree; int32_t ret = TSDB_CODE_SUCCESS; SFilterFieldId left = {0}, right = {0}; @@ -1296,7 +1298,6 @@ int32_t fltAddGroupUnitFromNode(SFilterInfo *info, SNode *tree, SArray *group) { if (node->opType == OP_TYPE_IN && (!IS_VAR_DATA_TYPE(type))) { SNodeListNode *listNode = (SNodeListNode *)node->pRight; - SListCell *cell = listNode->pNodeList->pHead; SScalarParam out = {.columnData = taosMemoryCalloc(1, sizeof(SColumnInfoData))}; if (out.columnData == NULL) { @@ -1305,8 +1306,10 @@ int32_t fltAddGroupUnitFromNode(SFilterInfo *info, SNode *tree, SArray *group) { out.columnData->info.type = type; out.columnData->info.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes; // reserved space for simple_copy - for (int32_t i = 0; i < listNode->pNodeList->length; ++i) { - SValueNode *valueNode = (SValueNode *)cell->pNode; + int32_t overflowCount = 0; + SNode* nodeItem = NULL; + FOREACH(nodeItem, listNode->pNodeList) { + SValueNode *valueNode = (SValueNode *)nodeItem; if (valueNode->node.resType.type != type) { int32_t overflow = 0; code = sclConvertValueToSclParam(valueNode, &out, &overflow); @@ -1316,7 +1319,7 @@ int32_t fltAddGroupUnitFromNode(SFilterInfo *info, SNode *tree, SArray *group) { } if (overflow) { - cell = cell->pNext; + ++overflowCount; continue; } @@ -1354,8 +1357,9 @@ int32_t fltAddGroupUnitFromNode(SFilterInfo *info, SNode *tree, SArray *group) { code = terrno; break; } - - cell = cell->pNext; + } + if(overflowCount == listNode->pNodeList->length) { + ctx->ignore = true; } colDataDestroy(out.columnData); taosMemoryFree(out.columnData); @@ -1692,10 +1696,17 @@ EDealRes fltTreeToGroup(SNode *pNode, void *pContext) { FLT_ERR_RET(terrno); } - SFltBuildGroupCtx tctx = {.info = ctx->info, .group = newGroup}; + SFltBuildGroupCtx tctx = {.info = ctx->info, .group = newGroup, .ignore = false}; nodesWalkExpr(cell->pNode, fltTreeToGroup, (void *)&tctx); FLT_ERR_JRET(tctx.code); - + if(tctx.ignore) { + ctx->ignore = true; + taosArrayDestroyEx(newGroup, filterFreeGroup); + newGroup = NULL; + taosArrayDestroyEx(resGroup, filterFreeGroup); + resGroup = NULL; + break; + } FLT_ERR_JRET(filterDetachCnfGroups(resGroup, preGroup, newGroup)); taosArrayDestroyEx(newGroup, filterFreeGroup); @@ -1707,9 +1718,10 @@ EDealRes fltTreeToGroup(SNode *pNode, void *pContext) { cell = cell->pNext; } - - if (NULL == taosArrayAddAll(ctx->group, preGroup)) { - FLT_ERR_JRET(terrno); + if (!ctx->ignore) { + if (NULL == taosArrayAddAll(ctx->group, preGroup)) { + FLT_ERR_JRET(terrno); + } } taosArrayDestroy(preGroup); @@ -1721,6 +1733,9 @@ EDealRes fltTreeToGroup(SNode *pNode, void *pContext) { SListCell *cell = node->pParameterList->pHead; for (int32_t i = 0; i < node->pParameterList->length; ++i) { nodesWalkExpr(cell->pNode, fltTreeToGroup, (void *)pContext); + if(ctx->ignore) { + ctx->ignore = false; + } FLT_ERR_JRET(ctx->code); cell = cell->pNext; @@ -1735,7 +1750,7 @@ EDealRes fltTreeToGroup(SNode *pNode, void *pContext) { } if (QUERY_NODE_OPERATOR == nType) { - FLT_ERR_JRET(fltAddGroupUnitFromNode(ctx->info, pNode, ctx->group)); + FLT_ERR_JRET(fltAddGroupUnitFromNode(ctx, ctx->info, pNode, ctx->group)); return DEAL_RES_IGNORE_CHILD; } @@ -2210,7 +2225,7 @@ int32_t fltInitValFieldData(SFilterInfo *info) { } if (unit->compare.optr == OP_TYPE_IN) { - FLT_ERR_RET(scalarGenerateSetFromList((void **)&fi->data, fi->desc, type)); + FLT_ERR_RET(scalarGenerateSetFromList((void **)&fi->data, fi->desc, type, 0)); if (fi->data == NULL) { fltError("failed to convert in param"); FLT_ERR_RET(TSDB_CODE_APP_ERROR); @@ -3831,13 +3846,21 @@ int32_t fltInitFromNode(SNode *tree, SFilterInfo *info, uint32_t options) { goto _return; } - SFltBuildGroupCtx tctx = {.info = info, .group = group}; + SFltBuildGroupCtx tctx = {.info = info, .group = group, .ignore = false}; nodesWalkExpr(tree, fltTreeToGroup, (void *)&tctx); if (TSDB_CODE_SUCCESS != tctx.code) { taosArrayDestroyEx(group, filterFreeGroup); code = tctx.code; goto _return; } + if (tctx.ignore) { + FILTER_SET_FLAG(info->status, FI_STATUS_EMPTY); + } + if (FILTER_EMPTY_RES(info)) { + info->func = filterExecuteImplEmpty; + taosArrayDestroyEx(group, filterFreeGroup); + return TSDB_CODE_SUCCESS; + } code = filterConvertGroupFromArray(info, group); if (TSDB_CODE_SUCCESS != code) { taosArrayDestroyEx(group, filterFreeGroup); @@ -3871,7 +3894,7 @@ int32_t fltInitFromNode(SNode *tree, SFilterInfo *info, uint32_t options) { _return: if (code) { - qInfo("init from node failed, code:%d", code); + qInfo("init from node failed, code:%d, %s", code, tstrerror(code)); } return code; } @@ -4561,8 +4584,7 @@ int32_t filterGetTimeRange(SNode *pNode, STimeWindow *win, bool *isStrict) { FLT_ERR_JRET(fltSclGetTimeStampDatum(endPt, &end)); win->skey = start.i; win->ekey = end.i; - if(optNode->opType == OP_TYPE_IN) *isStrict = false; - else *isStrict = true; + *isStrict = info->isStrict; goto _return; } else if (taosArrayGetSize(points) == 0) { *win = TSWINDOW_DESC_INITIALIZER; @@ -4740,7 +4762,7 @@ EDealRes fltReviseRewriter(SNode **pNode, void *pContext) { return DEAL_RES_CONTINUE; } - if (node->opType == OP_TYPE_NOT_IN || node->opType == OP_TYPE_NOT_LIKE || node->opType > OP_TYPE_IS_NOT_NULL || + if (node->opType == OP_TYPE_NOT_LIKE || node->opType > OP_TYPE_IS_NOT_NULL || node->opType == OP_TYPE_NOT_EQUAL) { stat->scalarMode = true; return DEAL_RES_CONTINUE; @@ -4814,7 +4836,7 @@ EDealRes fltReviseRewriter(SNode **pNode, void *pContext) { } } - if (OP_TYPE_IN == node->opType && QUERY_NODE_NODE_LIST != nodeType(node->pRight)) { + if ((OP_TYPE_IN == node->opType || OP_TYPE_NOT_IN == node->opType) && QUERY_NODE_NODE_LIST != nodeType(node->pRight)) { fltError("invalid IN operator node, rightType:%d", nodeType(node->pRight)); stat->code = TSDB_CODE_APP_ERROR; return DEAL_RES_ERROR; @@ -4822,25 +4844,37 @@ EDealRes fltReviseRewriter(SNode **pNode, void *pContext) { SColumnNode *refNode = (SColumnNode *)node->pLeft; SExprNode *exprNode = NULL; - if (OP_TYPE_IN != node->opType) { + if (OP_TYPE_IN != node->opType && OP_TYPE_NOT_IN != node->opType) { SValueNode *valueNode = (SValueNode *)node->pRight; if (FILTER_GET_FLAG(stat->info->options, FLT_OPTION_TIMESTAMP) && TSDB_DATA_TYPE_UBIGINT == valueNode->node.resType.type && valueNode->datum.u <= INT64_MAX) { valueNode->node.resType.type = TSDB_DATA_TYPE_BIGINT; } exprNode = &valueNode->node; + int32_t type = vectorGetConvertType(refNode->node.resType.type, exprNode->resType.type); + if (0 != type && type != refNode->node.resType.type) { + stat->scalarMode = true; + } } else { SNodeListNode *listNode = (SNodeListNode *)node->pRight; - if (LIST_LENGTH(listNode->pNodeList) > 10) { + if (LIST_LENGTH(listNode->pNodeList) > 10 || OP_TYPE_NOT_IN == node->opType) { stat->scalarMode = true; - return DEAL_RES_CONTINUE; } + int32_t type = refNode->node.resType.type; exprNode = &listNode->node; - } - int32_t type = vectorGetConvertType(refNode->node.resType.type, exprNode->resType.type); - if (0 != type && type != refNode->node.resType.type) { - stat->scalarMode = true; - return DEAL_RES_CONTINUE; + SNode* nodeItem = NULL; + FOREACH(nodeItem, listNode->pNodeList) { + SValueNode *valueNode = (SValueNode *)nodeItem; + int32_t tmp = vectorGetConvertType(type, valueNode->node.resType.type); + if (tmp != 0){ + stat->scalarMode = true; + type = tmp; + } + + } + if (IS_NUMERIC_TYPE(type)){ + exprNode->resType.type = type; + } } } @@ -4991,11 +5025,11 @@ int32_t fltSclBuildRangePoints(SFltSclOperator *oper, SArray *points) { } case OP_TYPE_IN: { SNodeListNode *listNode = (SNodeListNode *)oper->valNode; - SListCell *cell = listNode->pNodeList->pHead; SFltSclDatum minDatum = {.kind = FLT_SCL_DATUM_KIND_INT64, .i = INT64_MAX, .type = oper->colNode->node.resType}; SFltSclDatum maxDatum = {.kind = FLT_SCL_DATUM_KIND_INT64, .i = INT64_MIN, .type = oper->colNode->node.resType}; - for (int32_t i = 0; i < listNode->pNodeList->length; ++i) { - SValueNode *valueNode = (SValueNode *)cell->pNode; + SNode* nodeItem = NULL; + FOREACH(nodeItem, listNode->pNodeList) { + SValueNode *valueNode = (SValueNode *)nodeItem; SFltSclDatum valDatum; FLT_ERR_RET(fltSclBuildDatumFromValueNode(&valDatum, valueNode)); if(valueNode->node.resType.type == TSDB_DATA_TYPE_FLOAT || valueNode->node.resType.type == TSDB_DATA_TYPE_DOUBLE) { @@ -5005,7 +5039,6 @@ int32_t fltSclBuildRangePoints(SFltSclOperator *oper, SArray *points) { minDatum.i = TMIN(minDatum.i, valDatum.i); maxDatum.i = TMAX(maxDatum.i, valDatum.i); } - cell = cell->pNext; } SFltSclPoint startPt = {.start = true, .excl = false, .val = minDatum}; SFltSclPoint endPt = {.start = false, .excl = false, .val = maxDatum}; @@ -5026,7 +5059,8 @@ int32_t fltSclBuildRangePoints(SFltSclOperator *oper, SArray *points) { } // TODO: process DNF composed of CNF -int32_t fltSclProcessCNF(SArray *sclOpListCNF, SArray *colRangeList) { +static int32_t fltSclProcessCNF(SFilterInfo *pInfo, SArray *sclOpListCNF, SArray *colRangeList) { + pInfo->isStrict = true; size_t sz = taosArrayGetSize(sclOpListCNF); for (int32_t i = 0; i < sz; ++i) { SFltSclOperator *sclOper = taosArrayGet(sclOpListCNF, i); @@ -5049,10 +5083,16 @@ int32_t fltSclProcessCNF(SArray *sclOpListCNF, SArray *colRangeList) { taosArrayDestroy(colRange->points); taosArrayDestroy(points); colRange->points = merged; + if(merged->size == 0) { + return TSDB_CODE_SUCCESS; + } } else { taosArrayDestroy(colRange->points); colRange->points = points; } + if (sclOper->type == OP_TYPE_IN) { + pInfo->isStrict = false; + } } return TSDB_CODE_SUCCESS; } @@ -5154,7 +5194,7 @@ int32_t fltOptimizeNodes(SFilterInfo *pInfo, SNode **pNode, SFltTreeStat *pStat) if (NULL == colRangeList) { FLT_ERR_JRET(terrno); } - FLT_ERR_JRET(fltSclProcessCNF(sclOpList, colRangeList)); + FLT_ERR_JRET(fltSclProcessCNF(pInfo, sclOpList, colRangeList)); pInfo->sclCtx.fltSclRange = colRangeList; for (int32_t i = 0; i < taosArrayGetSize(sclOpList); ++i) { diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index 9a369cd4c4..9bab697772 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -116,7 +116,8 @@ _return: SCL_RET(code); } -int32_t scalarGenerateSetFromList(void **data, void *pNode, uint32_t type) { +// processType = 0 means all type. 1 means number, 2 means var, 3 means float, 4 means var&integer +int32_t scalarGenerateSetFromList(void **data, void *pNode, uint32_t type, int8_t processType) { SHashObj *pObj = taosHashInit(256, taosGetDefaultHashFunction(type), true, false); if (NULL == pObj) { sclError("taosHashInit failed, size:%d", 256); @@ -127,7 +128,6 @@ int32_t scalarGenerateSetFromList(void **data, void *pNode, uint32_t type) { int32_t code = 0; SNodeListNode *nodeList = (SNodeListNode *)pNode; - SListCell *cell = nodeList->pNodeList->pHead; SScalarParam out = {.columnData = taosMemoryCalloc(1, sizeof(SColumnInfoData))}; if (out.columnData == NULL) { SCL_ERR_JRET(terrno); @@ -135,8 +135,14 @@ int32_t scalarGenerateSetFromList(void **data, void *pNode, uint32_t type) { int32_t len = 0; void *buf = NULL; - for (int32_t i = 0; i < nodeList->pNodeList->length; ++i) { - SValueNode *valueNode = (SValueNode *)cell->pNode; + SNode* nodeItem = NULL; + FOREACH(nodeItem, nodeList->pNodeList) { + SValueNode *valueNode = (SValueNode *)nodeItem; + if ((IS_VAR_DATA_TYPE(valueNode->node.resType.type) && (processType == 1 || processType == 3)) || + (IS_INTEGER_TYPE(valueNode->node.resType.type) && (processType == 2 || processType == 3)) || + (IS_FLOAT_TYPE(valueNode->node.resType.type) && (processType == 2 || processType == 4))) { + continue; + } if (valueNode->node.resType.type != type) { out.columnData->info.type = type; @@ -158,7 +164,6 @@ int32_t scalarGenerateSetFromList(void **data, void *pNode, uint32_t type) { } if (overflow) { - cell = cell->pNext; continue; } @@ -184,7 +189,6 @@ int32_t scalarGenerateSetFromList(void **data, void *pNode, uint32_t type) { } colInfoDataCleanup(out.columnData, out.numOfRows); - cell = cell->pNext; } *data = pObj; @@ -230,6 +234,11 @@ void sclFreeParam(SScalarParam *param) { taosHashCleanup(param->pHashFilter); param->pHashFilter = NULL; } + + if (param->pHashFilterOthers != NULL) { + taosHashCleanup(param->pHashFilterOthers); + param->pHashFilterOthers = NULL; + } } int32_t sclCopyValueNodeValue(SValueNode *pNode, void **res) { @@ -369,17 +378,37 @@ int32_t sclInitParam(SNode *node, SScalarParam *param, SScalarCtx *ctx, int32_t SCL_RET(TSDB_CODE_QRY_INVALID_INPUT); } - int32_t type = vectorGetConvertType(ctx->type.selfType, ctx->type.peerType); - if (type == 0) { - type = nodeList->node.resType.type; + int32_t type = ctx->type.selfType; + SNode* nodeItem = NULL; + FOREACH(nodeItem, nodeList->pNodeList) { + SValueNode *valueNode = (SValueNode *)nodeItem; + int32_t tmp = vectorGetConvertType(type, valueNode->node.resType.type); + if (tmp != 0){ + type = tmp; + } + + } + if (IS_NUMERIC_TYPE(type)){ + ctx->type.peerType = type; + } + type = ctx->type.peerType; + if (IS_VAR_DATA_TYPE(ctx->type.selfType) && IS_NUMERIC_TYPE(type)){ + SCL_ERR_RET(scalarGenerateSetFromList((void **)¶m->pHashFilter, node, type, 1)); + SCL_ERR_RET(scalarGenerateSetFromList((void **)¶m->pHashFilterOthers, node, ctx->type.selfType, 2)); + } else if (IS_INTEGER_TYPE(ctx->type.selfType) && IS_FLOAT_TYPE(type)){ + SCL_ERR_RET(scalarGenerateSetFromList((void **)¶m->pHashFilter, node, type, 3)); + SCL_ERR_RET(scalarGenerateSetFromList((void **)¶m->pHashFilterOthers, node, ctx->type.selfType, 4)); + } else { + SCL_ERR_RET(scalarGenerateSetFromList((void **)¶m->pHashFilter, node, type, 0)); } - SCL_ERR_RET(scalarGenerateSetFromList((void **)¶m->pHashFilter, node, type)); param->hashValueType = type; param->colAlloced = true; if (taosHashPut(ctx->pRes, &node, POINTER_BYTES, param, sizeof(*param))) { taosHashCleanup(param->pHashFilter); param->pHashFilter = NULL; + taosHashCleanup(param->pHashFilterOthers); + param->pHashFilterOthers = NULL; sclError("taosHashPut nodeList failed, size:%d", (int32_t)sizeof(*param)); return terrno; } @@ -512,14 +541,15 @@ int32_t sclInitParamList(SScalarParam **pParams, SNodeList *pParamList, SScalarC } if (0 == *rowNum) { - taosMemoryFreeClear(paramList); + sclFreeParamList(paramList, *paramNum); + paramList = NULL; } *pParams = paramList; return TSDB_CODE_SUCCESS; _return: - taosMemoryFreeClear(paramList); + sclFreeParamList(paramList, *paramNum); SCL_RET(code); } @@ -588,7 +618,6 @@ int32_t sclInitOperatorParams(SScalarParam **pParams, SOperatorNode *node, SScal SCL_ERR_JRET(sclInitParam(node->pLeft, ¶mList[0], ctx, rowNum)); setTzCharset(¶mList[0], node->tz, node->charsetCxt); if (paramNum > 1) { - TSWAP(ctx->type.selfType, ctx->type.peerType); SCL_ERR_JRET(sclInitParam(node->pRight, ¶mList[1], ctx, rowNum)); setTzCharset(¶mList[1], node->tz, node->charsetCxt); } @@ -1695,15 +1724,12 @@ static int32_t sclGetMathOperatorResType(SOperatorNode *pOp) { if ((TSDB_DATA_TYPE_TIMESTAMP == ldt.type && TSDB_DATA_TYPE_TIMESTAMP == rdt.type) || TSDB_DATA_TYPE_VARBINARY == ldt.type || TSDB_DATA_TYPE_VARBINARY == rdt.type || - (TSDB_DATA_TYPE_TIMESTAMP == ldt.type && (IS_VAR_DATA_TYPE(rdt.type) || IS_FLOAT_TYPE(rdt.type))) || - (TSDB_DATA_TYPE_TIMESTAMP == rdt.type && (IS_VAR_DATA_TYPE(ldt.type) || IS_FLOAT_TYPE(ldt.type)))) { + (TSDB_DATA_TYPE_TIMESTAMP == ldt.type && (IS_VAR_DATA_TYPE(rdt.type))) || + (TSDB_DATA_TYPE_TIMESTAMP == rdt.type && (IS_VAR_DATA_TYPE(ldt.type)))) { return TSDB_CODE_TSC_INVALID_OPERATION; } - if ((TSDB_DATA_TYPE_TIMESTAMP == ldt.type && IS_INTEGER_TYPE(rdt.type)) || - (TSDB_DATA_TYPE_TIMESTAMP == rdt.type && IS_INTEGER_TYPE(ldt.type)) || - (TSDB_DATA_TYPE_TIMESTAMP == ldt.type && TSDB_DATA_TYPE_BOOL == rdt.type) || - (TSDB_DATA_TYPE_TIMESTAMP == rdt.type && TSDB_DATA_TYPE_BOOL == ldt.type)) { + if (checkOperatorRestypeIsTimestamp(pOp->opType, ldt.type, rdt.type)) { pOp->node.resType.type = TSDB_DATA_TYPE_TIMESTAMP; pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes; } else { diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 1da9a8f123..ce431c0b18 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -2985,279 +2985,93 @@ static int32_t doScalarFunction2(SScalarParam *pInput, int32_t inputNum, SScalar bool hasNullType = (IS_NULL_TYPE(GET_PARAM_TYPE(&pInput[0])) || IS_NULL_TYPE(GET_PARAM_TYPE(&pInput[1]))); int32_t numOfRows = TMAX(pInput[0].numOfRows, pInput[1].numOfRows); - if (pInput[0].numOfRows == pInput[1].numOfRows) { - for (int32_t i = 0; i < numOfRows; ++i) { - if (colDataIsNull_s(pInputData[0], i) || colDataIsNull_s(pInputData[1], i) || hasNullType) { - colDataSetNULL(pOutputData, i); - continue; - } - double in2; - GET_TYPED_DATA(in2, double, GET_PARAM_TYPE(&pInput[1]), colDataGetData(pInputData[1], i)); - switch (GET_PARAM_TYPE(&pInput[0])) { - case TSDB_DATA_TYPE_DOUBLE: { - double *in = (double *)pInputData[0]->pData; - double *out = (double *)pOutputData->pData; - double result = d1(in[i], in2); - if (isinf(result) || isnan(result)) { - colDataSetNULL(pOutputData, i); - } else { - out[i] = result; - } - break; - } - case TSDB_DATA_TYPE_FLOAT: { - float *in = (float *)pInputData[0]->pData; - float *out = (float *)pOutputData->pData; - float result = f1(in[i], (float)in2); - if (isinf(result) || isnan(result)) { - colDataSetNULL(pOutputData, i); - } else { - out[i] = result; - } - break; - } - case TSDB_DATA_TYPE_TINYINT: { - int8_t *in = (int8_t *)pInputData[0]->pData; - int8_t *out = (int8_t *)pOutputData->pData; - int8_t result = (int8_t)d1((double)in[i], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - int16_t *in = (int16_t *)pInputData[0]->pData; - int16_t *out = (int16_t *)pOutputData->pData; - int16_t result = (int16_t)d1((double)in[i], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_INT: { - int32_t *in = (int32_t *)pInputData[0]->pData; - int32_t *out = (int32_t *)pOutputData->pData; - int32_t result = (int32_t)d1((double)in[i], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_BIGINT: { - int64_t *in = (int64_t *)pInputData[0]->pData; - int64_t *out = (int64_t *)pOutputData->pData; - int64_t result = (int64_t)d1((double)in[i], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - uint8_t *in = (uint8_t *)pInputData[0]->pData; - uint8_t *out = (uint8_t *)pOutputData->pData; - uint8_t result = (uint8_t)d1((double)in[i], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - uint16_t *in = (uint16_t *)pInputData[0]->pData; - uint16_t *out = (uint16_t *)pOutputData->pData; - uint16_t result = (uint16_t)d1((double)in[i], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_UINT: { - uint32_t *in = (uint32_t *)pInputData[0]->pData; - uint32_t *out = (uint32_t *)pOutputData->pData; - uint32_t result = (uint32_t)d1((double)in[i], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - uint64_t *in = (uint64_t *)pInputData[0]->pData; - uint64_t *out = (uint64_t *)pOutputData->pData; - uint64_t result = (uint64_t)d1((double)in[i], in2); - out[i] = result; - break; - } - } + for (int32_t i = 0; i < numOfRows; ++i) { + int32_t colIdx1 = (pInput[0].numOfRows == 1) ? 0 : i; + int32_t colIdx2 = (pInput[1].numOfRows == 1) ? 0 : i; + if (colDataIsNull_s(pInputData[0], colIdx1) || colDataIsNull_s(pInputData[1], colIdx2) || hasNullType) { + colDataSetNULL(pOutputData, i); + continue; } - } else if (pInput[0].numOfRows == 1) { // left operand is constant - if (colDataIsNull_s(pInputData[0], 0) || hasNullType) { - colDataSetNNULL(pOutputData, 0, pInput[1].numOfRows); - } else { - for (int32_t i = 0; i < numOfRows; ++i) { - if (colDataIsNull_s(pInputData[1], i)) { + double in2; + GET_TYPED_DATA(in2, double, GET_PARAM_TYPE(&pInput[1]), colDataGetData(pInputData[1], colIdx2)); + switch (GET_PARAM_TYPE(&pInput[0])) { + case TSDB_DATA_TYPE_DOUBLE: { + double *in = (double *)pInputData[0]->pData; + double *out = (double *)pOutputData->pData; + double result = d1(in[colIdx1], in2); + if (isinf(result) || isnan(result)) { colDataSetNULL(pOutputData, i); - continue; - } - double in2; - GET_TYPED_DATA(in2, double, GET_PARAM_TYPE(&pInput[1]), colDataGetData(pInputData[1], i)); - switch (GET_PARAM_TYPE(&pInput[0])) { - case TSDB_DATA_TYPE_DOUBLE: { - double *in = (double *)pInputData[0]->pData; - double *out = (double *)pOutputData->pData; - double result = d1(in[0], in2); - if (isinf(result) || isnan(result)) { - colDataSetNULL(pOutputData, i); - } else { - out[i] = result; - } - break; - } - case TSDB_DATA_TYPE_FLOAT: { - float *in = (float *)pInputData[0]->pData; - float *out = (float *)pOutputData->pData; - float result = f1(in[0], (float)in2); - if (isinf(result) || isnan(result)) { - colDataSetNULL(pOutputData, i); - } else { - out[i] = result; - } - break; - } - case TSDB_DATA_TYPE_TINYINT: { - int8_t *in = (int8_t *)pInputData[0]->pData; - int8_t *out = (int8_t *)pOutputData->pData; - int8_t result = (int8_t)d1((double)in[0], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - int16_t *in = (int16_t *)pInputData[0]->pData; - int16_t *out = (int16_t *)pOutputData->pData; - int16_t result = (int16_t)d1((double)in[0], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_INT: { - int32_t *in = (int32_t *)pInputData[0]->pData; - int32_t *out = (int32_t *)pOutputData->pData; - int32_t result = (int32_t)d1((double)in[0], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_BIGINT: { - int64_t *in = (int64_t *)pInputData[0]->pData; - int64_t *out = (int64_t *)pOutputData->pData; - int64_t result = (int64_t)d1((double)in[0], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - uint8_t *in = (uint8_t *)pInputData[0]->pData; - uint8_t *out = (uint8_t *)pOutputData->pData; - uint8_t result = (uint8_t)d1((double)in[0], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - uint16_t *in = (uint16_t *)pInputData[0]->pData; - uint16_t *out = (uint16_t *)pOutputData->pData; - uint16_t result = (uint16_t)d1((double)in[0], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_UINT: { - uint32_t *in = (uint32_t *)pInputData[0]->pData; - uint32_t *out = (uint32_t *)pOutputData->pData; - uint32_t result = (uint32_t)d1((double)in[0], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - uint64_t *in = (uint64_t *)pInputData[0]->pData; - uint64_t *out = (uint64_t *)pOutputData->pData; - uint64_t result = (uint64_t)d1((double)in[0], in2); - out[i] = result; - break; - } + } else { + out[i] = result; } + break; } - } - } else if (pInput[1].numOfRows == 1) { - if (colDataIsNull_s(pInputData[1], 0) || hasNullType) { - colDataSetNNULL(pOutputData, 0, pInput[0].numOfRows); - } else { - for (int32_t i = 0; i < numOfRows; ++i) { - if (colDataIsNull_s(pInputData[0], i)) { + case TSDB_DATA_TYPE_FLOAT: { + float *in = (float *)pInputData[0]->pData; + float *out = (float *)pOutputData->pData; + float result = f1(in[colIdx1], (float)in2); + if (isinf(result) || isnan(result)) { colDataSetNULL(pOutputData, i); - continue; - } - double in2; - GET_TYPED_DATA(in2, double, GET_PARAM_TYPE(&pInput[1]), colDataGetData(pInputData[1], 0)); - switch (GET_PARAM_TYPE(&pInput[0])) { - case TSDB_DATA_TYPE_DOUBLE: { - double *in = (double *)pInputData[0]->pData; - double *out = (double *)pOutputData->pData; - double result = d1(in[i], in2); - if (isinf(result) || isnan(result)) { - colDataSetNULL(pOutputData, i); - } else { - out[i] = result; - } - break; - } - case TSDB_DATA_TYPE_FLOAT: { - float *in = (float *)pInputData[0]->pData; - float *out = (float *)pOutputData->pData; - float result = f1(in[i], in2); - if (isinf(result) || isnan(result)) { - colDataSetNULL(pOutputData, i); - } else { - out[i] = result; - } - break; - } - case TSDB_DATA_TYPE_TINYINT: { - int8_t *in = (int8_t *)pInputData[0]->pData; - int8_t *out = (int8_t *)pOutputData->pData; - int8_t result = (int8_t)d1((double)in[i], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - int16_t *in = (int16_t *)pInputData[0]->pData; - int16_t *out = (int16_t *)pOutputData->pData; - int16_t result = (int16_t)d1((double)in[i], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_INT: { - int32_t *in = (int32_t *)pInputData[0]->pData; - int32_t *out = (int32_t *)pOutputData->pData; - int32_t result = (int32_t)d1((double)in[i], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_BIGINT: { - int64_t *in = (int64_t *)pInputData[0]->pData; - int64_t *out = (int64_t *)pOutputData->pData; - int64_t result = (int64_t)d1((double)in[i], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - uint8_t *in = (uint8_t *)pInputData[0]->pData; - uint8_t *out = (uint8_t *)pOutputData->pData; - uint8_t result = (uint8_t)d1((double)in[i], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - uint16_t *in = (uint16_t *)pInputData[0]->pData; - uint16_t *out = (uint16_t *)pOutputData->pData; - uint16_t result = (uint16_t)d1((double)in[i], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_UINT: { - uint32_t *in = (uint32_t *)pInputData[0]->pData; - uint32_t *out = (uint32_t *)pOutputData->pData; - uint32_t result = (uint32_t)d1((double)in[i], in2); - out[i] = result; - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - uint64_t *in = (uint64_t *)pInputData[0]->pData; - uint64_t *out = (uint64_t *)pOutputData->pData; - uint64_t result = (uint64_t)d1((double)in[i], in2); - out[i] = result; - break; - } + } else { + out[i] = result; } + break; + } + case TSDB_DATA_TYPE_TINYINT: { + int8_t *in = (int8_t *)pInputData[0]->pData; + int8_t *out = (int8_t *)pOutputData->pData; + int8_t result = (int8_t)d1((double)in[colIdx1], in2); + out[i] = result; + break; + } + case TSDB_DATA_TYPE_SMALLINT: { + int16_t *in = (int16_t *)pInputData[0]->pData; + int16_t *out = (int16_t *)pOutputData->pData; + int16_t result = (int16_t)d1((double)in[colIdx1], in2); + out[i] = result; + break; + } + case TSDB_DATA_TYPE_INT: { + int32_t *in = (int32_t *)pInputData[0]->pData; + int32_t *out = (int32_t *)pOutputData->pData; + int32_t result = (int32_t)d1((double)in[colIdx1], in2); + out[i] = result; + break; + } + case TSDB_DATA_TYPE_BIGINT: { + int64_t *in = (int64_t *)pInputData[0]->pData; + int64_t *out = (int64_t *)pOutputData->pData; + int64_t result = (int64_t)d1((double)in[colIdx1], in2); + out[i] = result; + break; + } + case TSDB_DATA_TYPE_UTINYINT: { + uint8_t *in = (uint8_t *)pInputData[0]->pData; + uint8_t *out = (uint8_t *)pOutputData->pData; + uint8_t result = (uint8_t)d1((double)in[colIdx1], in2); + out[i] = result; + break; + } + case TSDB_DATA_TYPE_USMALLINT: { + uint16_t *in = (uint16_t *)pInputData[0]->pData; + uint16_t *out = (uint16_t *)pOutputData->pData; + uint16_t result = (uint16_t)d1((double)in[colIdx1], in2); + out[i] = result; + break; + } + case TSDB_DATA_TYPE_UINT: { + uint32_t *in = (uint32_t *)pInputData[0]->pData; + uint32_t *out = (uint32_t *)pOutputData->pData; + uint32_t result = (uint32_t)d1((double)in[colIdx1], in2); + out[i] = result; + break; + } + case TSDB_DATA_TYPE_UBIGINT: { + uint64_t *in = (uint64_t *)pInputData[0]->pData; + uint64_t *out = (uint64_t *)pOutputData->pData; + uint64_t result = (uint64_t)d1((double)in[colIdx1], in2); + out[i] = result; + break; } } } diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c index fd1bd927b0..14dae1226d 100644 --- a/source/libs/scalar/src/sclvector.c +++ b/source/libs/scalar/src/sclvector.c @@ -24,6 +24,7 @@ #include "tcompare.h" #include "tdatablock.h" #include "tdataformat.h" +#include "tdef.h" #include "ttime.h" #include "ttypes.h" #include "geosWrapper.h" @@ -120,19 +121,6 @@ _return: SCL_RET(code); } -int32_t convertBinaryToDouble(const void *inData, void *outData) { - char *tmp = taosMemoryCalloc(1, varDataTLen(inData)); - if (tmp == NULL) { - *((double *)outData) = 0.; - SCL_ERR_RET(terrno); - } - (void)memcpy(tmp, varDataVal(inData), varDataLen(inData)); - double ret = taosStr2Double(tmp, NULL); - taosMemoryFree(tmp); - *((double *)outData) = ret; - SCL_RET(TSDB_CODE_SUCCESS); -} - typedef int32_t (*_getBigintValue_fn_t)(void *src, int32_t index, int64_t *res); int32_t getVectorBigintValue_TINYINT(void *src, int32_t index, int64_t *res) { @@ -1008,28 +996,29 @@ int32_t vectorConvertSingleColImpl(const SScalarParam *pIn, SScalarParam *pOut, } int8_t gConvertTypes[TSDB_DATA_TYPE_MAX][TSDB_DATA_TYPE_MAX] = { - /* NULL BOOL TINY SMAL INT BIG FLOA DOUB VARC TIME NCHA UTIN USMA UINT UBIG JSON VARB DECI BLOB MEDB GEOM*/ - /*NULL*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /*BOOL*/ 0, 0, 2, 3, 4, 5, 6, 7, 5, 9, 7, 11, 12, 13, 14, 0, -1, 0, 0, 0, -1, - /*TINY*/ 0, 0, 0, 3, 4, 5, 6, 7, 5, 9, 7, 3, 4, 5, 7, 0, -1, 0, 0, 0, -1, - /*SMAL*/ 0, 0, 0, 0, 4, 5, 6, 7, 5, 9, 7, 3, 4, 5, 7, 0, -1, 0, 0, 0, -1, - /*INT */ 0, 0, 0, 0, 0, 5, 6, 7, 5, 9, 7, 4, 4, 5, 7, 0, -1, 0, 0, 0, -1, - /*BIGI*/ 0, 0, 0, 0, 0, 0, 6, 7, 5, 9, 7, 5, 5, 5, 7, 0, -1, 0, 0, 0, -1, - /*FLOA*/ 0, 0, 0, 0, 0, 0, 0, 7, 7, 6, 7, 6, 6, 6, 6, 0, -1, 0, 0, 0, -1, - /*DOUB*/ 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7, 7, 0, -1, 0, 0, 0, -1, - /*VARC*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 8, 7, 7, 7, 7, 0, 16, 0, 0, 0, 20, - /*TIME*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 7, 0, -1, 0, 0, 0, -1, - /*NCHA*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 0, 16, 0, 0, 0, -1, - /*UTIN*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 13, 14, 0, -1, 0, 0, 0, -1, - /*USMA*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 14, 0, -1, 0, 0, 0, -1, - /*UINT*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 0, -1, 0, 0, 0, -1, - /*UBIG*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, -1, - /*JSON*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, -1, - /*VARB*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1,-1, -1, - /*DECI*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, -1, - /*BLOB*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, -1, - /*MEDB*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, -1, - /*GEOM*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0}; + /*NULL BOOL TINY SMAL INT BIG FLOA DOUB VARC TIME NCHA UTIN USMA UINT UBIG JSON VARB DECI BLOB MEDB GEOM*/ + /*NULL*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /*BOOL*/ 0, 0, 2, 3, 4, 5, 6, 7, 5, 9, 5, 11, 12, 13, 14, 0, -1, 0, 0, 0, -1, + /*TINY*/ 0, 0, 0, 3, 4, 5, 6, 7, 5, 9, 5, 3, 4, 5, 7, 0, -1, 0, 0, 0, -1, + /*SMAL*/ 0, 0, 0, 0, 4, 5, 6, 7, 5, 9, 5, 3, 4, 5, 7, 0, -1, 0, 0, 0, -1, + /*INT */ 0, 0, 0, 0, 0, 5, 6, 7, 5, 9, 5, 4, 4, 5, 7, 0, -1, 0, 0, 0, -1, + /*BIGI*/ 0, 0, 0, 0, 0, 0, 6, 7, 5, 9, 5, 5, 5, 5, 7, 0, -1, 0, 0, 0, -1, + /*FLOA*/ 0, 0, 0, 0, 0, 0, 0, 7, 6, 6, 6, 6, 6, 6, 6, 0, -1, 0, 0, 0, -1, + /*DOUB*/ 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7, 7, 0, -1, 0, 0, 0, -1, + /*VARC*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 8, 7, 7, 7, 7, 0, 16, 0, 0, 0, 20, + /*TIME*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 7, 0, -1, 0, 0, 0, -1, + /*NCHA*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 0, 16, 0, 0, 0, -1, + /*UTIN*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 13, 14, 0, -1, 0, 0, 0, -1, + /*USMA*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 14, 0, -1, 0, 0, 0, -1, + /*UINT*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 0, -1, 0, 0, 0, -1, + /*UBIG*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, -1, + /*JSON*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, -1, + /*VARB*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, + /*DECI*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, -1, + /*BLOB*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, -1, + /*MEDB*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, -1, + /*GEOM*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0 +}; int8_t gDisplyTypes[TSDB_DATA_TYPE_MAX][TSDB_DATA_TYPE_MAX] = { /*NULL BOOL TINY SMAL INT BIGI FLOA DOUB VARC TIM NCHA UTIN USMA UINT UBIG JSON VARB DECI BLOB MEDB GEOM*/ @@ -1070,6 +1059,9 @@ int32_t vectorGetConvertType(int32_t type1, int32_t type2) { int32_t vectorConvertSingleCol(SScalarParam *input, SScalarParam *output, int32_t type, int32_t startIndex, int32_t numOfRows) { + if (input->columnData == NULL && (input->pHashFilter != NULL || input->pHashFilterOthers != NULL)){ + return TSDB_CODE_SUCCESS; + } output->numOfRows = input->numOfRows; SDataType t = {.type = type}; @@ -1100,36 +1092,18 @@ int32_t vectorConvertCols(SScalarParam *pLeft, SScalarParam *pRight, SScalarPara int8_t type = 0; int32_t code = 0; - SScalarParam *param1 = NULL, *paramOut1 = NULL; - SScalarParam *param2 = NULL, *paramOut2 = NULL; + SScalarParam *param1 = pLeft, *paramOut1 = pLeftOut; + SScalarParam *param2 = pRight, *paramOut2 = pRightOut; // always convert least data if (IS_VAR_DATA_TYPE(leftType) && IS_VAR_DATA_TYPE(rightType) && (pLeft->numOfRows != pRight->numOfRows) && leftType != TSDB_DATA_TYPE_JSON && rightType != TSDB_DATA_TYPE_JSON) { - param1 = pLeft; - param2 = pRight; - paramOut1 = pLeftOut; - paramOut2 = pRightOut; - if (pLeft->numOfRows > pRight->numOfRows) { type = leftType; } else { type = rightType; } } else { - // we only define half value in the convert-matrix, so make sure param1 always less equal than param2 - if (leftType < rightType) { - param1 = pLeft; - param2 = pRight; - paramOut1 = pLeftOut; - paramOut2 = pRightOut; - } else { - param1 = pRight; - param2 = pLeft; - paramOut1 = pRightOut; - paramOut2 = pLeftOut; - } - type = vectorGetConvertType(GET_PARAM_TYPE(param1), GET_PARAM_TYPE(param2)); if (0 == type) { return TSDB_CODE_SUCCESS; @@ -1142,17 +1116,11 @@ int32_t vectorConvertCols(SScalarParam *pLeft, SScalarParam *pRight, SScalarPara } if (type != GET_PARAM_TYPE(param1)) { - code = vectorConvertSingleCol(param1, paramOut1, type, startIndex, numOfRows); - if (code) { - return code; - } + SCL_ERR_RET(vectorConvertSingleCol(param1, paramOut1, type, startIndex, numOfRows)); } if (type != GET_PARAM_TYPE(param2)) { - code = vectorConvertSingleCol(param2, paramOut2, type, startIndex, numOfRows); - if (code) { - return code; - } + SCL_ERR_RET(vectorConvertSingleCol(param2, paramOut2, type, startIndex, numOfRows)); } return TSDB_CODE_SUCCESS; @@ -1221,22 +1189,16 @@ static int32_t vectorMathTsAddHelper(SColumnInfoData *pLeftCol, SColumnInfoData static int32_t vectorConvertVarToDouble(SScalarParam *pInput, int32_t *converted, SColumnInfoData **pOutputCol) { SScalarParam output = {0}; SColumnInfoData *pCol = pInput->columnData; - + int32_t code = TSDB_CODE_SUCCESS; + *pOutputCol = NULL; if (IS_VAR_DATA_TYPE(pCol->info.type) && pCol->info.type != TSDB_DATA_TYPE_JSON && pCol->info.type != TSDB_DATA_TYPE_VARBINARY) { - int32_t code = vectorConvertSingleCol(pInput, &output, TSDB_DATA_TYPE_DOUBLE, -1, -1); - if (code != TSDB_CODE_SUCCESS) { - *pOutputCol = NULL; - SCL_ERR_RET(code); - } - + SCL_ERR_RET(vectorConvertSingleCol(pInput, &output, TSDB_DATA_TYPE_DOUBLE, -1, -1)); *converted = VECTOR_DO_CONVERT; - *pOutputCol = output.columnData; SCL_RET(code); } *converted = VECTOR_UN_CONVERT; - *pOutputCol = pInput->columnData; SCL_RET(TSDB_CODE_SUCCESS); } @@ -1262,12 +1224,7 @@ int32_t vectorMathAdd(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *p SColumnInfoData *pRightCol = NULL; SCL_ERR_JRET(vectorConvertVarToDouble(pLeft, &leftConvert, &pLeftCol)); SCL_ERR_JRET(vectorConvertVarToDouble(pRight, &rightConvert, &pRightCol)); - - if ((GET_PARAM_TYPE(pLeft) == TSDB_DATA_TYPE_TIMESTAMP && IS_INTEGER_TYPE(GET_PARAM_TYPE(pRight))) || - (GET_PARAM_TYPE(pRight) == TSDB_DATA_TYPE_TIMESTAMP && IS_INTEGER_TYPE(GET_PARAM_TYPE(pLeft))) || - (GET_PARAM_TYPE(pLeft) == TSDB_DATA_TYPE_TIMESTAMP && GET_PARAM_TYPE(pRight) == TSDB_DATA_TYPE_BOOL) || - (GET_PARAM_TYPE(pRight) == TSDB_DATA_TYPE_TIMESTAMP && - GET_PARAM_TYPE(pLeft) == TSDB_DATA_TYPE_BOOL)) { // timestamp plus duration + if(checkOperatorRestypeIsTimestamp(OP_TYPE_ADD, GET_PARAM_TYPE(pLeft), GET_PARAM_TYPE(pRight))) { // timestamp plus duration int64_t *output = (int64_t *)pOutputCol->pData; _getBigintValue_fn_t getVectorBigintValueFnLeft; _getBigintValue_fn_t getVectorBigintValueFnRight; @@ -1399,9 +1356,7 @@ int32_t vectorMathSub(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *p SCL_ERR_JRET(vectorConvertVarToDouble(pLeft, &leftConvert, &pLeftCol)); SCL_ERR_JRET(vectorConvertVarToDouble(pRight, &rightConvert, &pRightCol)); - if ((GET_PARAM_TYPE(pLeft) == TSDB_DATA_TYPE_TIMESTAMP && GET_PARAM_TYPE(pRight) == TSDB_DATA_TYPE_BIGINT) || - (GET_PARAM_TYPE(pRight) == TSDB_DATA_TYPE_TIMESTAMP && - GET_PARAM_TYPE(pLeft) == TSDB_DATA_TYPE_BIGINT)) { // timestamp minus duration + if (checkOperatorRestypeIsTimestamp(OP_TYPE_SUB, GET_PARAM_TYPE(pLeft), GET_PARAM_TYPE(pRight))) { // timestamp minus duration int64_t *output = (int64_t *)pOutputCol->pData; _getBigintValue_fn_t getVectorBigintValueFnLeft; _getBigintValue_fn_t getVectorBigintValueFnRight; @@ -1636,68 +1591,25 @@ int32_t vectorMathRemainder(SScalarParam *pLeft, SScalarParam *pRight, SScalarPa double *output = (double *)pOutputCol->pData; - if (pLeft->numOfRows == pRight->numOfRows) { - for (; i < pRight->numOfRows && i >= 0; i += step, output += 1) { - if (IS_NULL) { - colDataSetNULL(pOutputCol, i); - continue; - } - - double lx = 0; - double rx = 0; - SCL_ERR_JRET(getVectorDoubleValueFnLeft(LEFT_COL, i, &lx)); - SCL_ERR_JRET(getVectorDoubleValueFnRight(RIGHT_COL, i, &rx)); - if (isnan(lx) || isinf(lx) || isnan(rx) || isinf(rx) || FLT_EQUAL(rx, 0)) { - colDataSetNULL(pOutputCol, i); - continue; - } - - *output = lx - ((int64_t)(lx / rx)) * rx; + int32_t numOfRows = TMAX(pLeft->numOfRows, pRight->numOfRows); + for (; i < numOfRows && i >= 0; i += step, output += 1) { + int32_t leftidx = pLeft->numOfRows == 1 ? 0 : i; + int32_t rightidx = pRight->numOfRows == 1 ? 0 : i; + if (IS_HELPER_NULL(pLeftCol, leftidx) || IS_HELPER_NULL(pRightCol, rightidx)) { + colDataSetNULL(pOutputCol, i); + continue; } - } else if (pLeft->numOfRows == 1) { + double lx = 0; - SCL_ERR_JRET(getVectorDoubleValueFnLeft(LEFT_COL, 0, &lx)); - if (IS_HELPER_NULL(pLeftCol, 0)) { // Set pLeft->numOfRows NULL value - colDataSetNNULL(pOutputCol, 0, pRight->numOfRows); - } else { - for (; i >= 0 && i < pRight->numOfRows; i += step, output += 1) { - if (IS_HELPER_NULL(pRightCol, i)) { - colDataSetNULL(pOutputCol, i); - continue; - } - - double rx = 0; - SCL_ERR_JRET(getVectorDoubleValueFnRight(RIGHT_COL, i, &rx)); - if (isnan(rx) || isinf(rx) || FLT_EQUAL(rx, 0)) { - colDataSetNULL(pOutputCol, i); - continue; - } - - *output = lx - ((int64_t)(lx / rx)) * rx; - } - } - } else if (pRight->numOfRows == 1) { double rx = 0; - SCL_ERR_JRET(getVectorDoubleValueFnRight(RIGHT_COL, 0, &rx)); - if (IS_HELPER_NULL(pRightCol, 0) || FLT_EQUAL(rx, 0)) { // Set pLeft->numOfRows NULL value - colDataSetNNULL(pOutputCol, 0, pLeft->numOfRows); - } else { - for (; i >= 0 && i < pLeft->numOfRows; i += step, output += 1) { - if (IS_HELPER_NULL(pLeftCol, i)) { - colDataSetNULL(pOutputCol, i); - continue; - } - - double lx = 0; - SCL_ERR_JRET(getVectorDoubleValueFnLeft(LEFT_COL, i, &lx)); - if (isnan(lx) || isinf(lx)) { - colDataSetNULL(pOutputCol, i); - continue; - } - - *output = lx - ((int64_t)(lx / rx)) * rx; - } + SCL_ERR_JRET(getVectorDoubleValueFnLeft(LEFT_COL, leftidx, &lx)); + SCL_ERR_JRET(getVectorDoubleValueFnRight(RIGHT_COL, rightidx, &rx)); + if (isnan(lx) || isinf(lx) || isnan(rx) || isinf(rx) || FLT_EQUAL(rx, 0)) { + colDataSetNULL(pOutputCol, i); + continue; } + + *output = lx - ((int64_t)(lx / rx)) * rx; } _return: @@ -1759,33 +1671,6 @@ int32_t vectorAssign(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *pO return TSDB_CODE_SUCCESS; } -static int32_t vectorBitAndHelper(SColumnInfoData *pLeftCol, SColumnInfoData *pRightCol, SColumnInfoData *pOutputCol, - int32_t numOfRows, int32_t step, int32_t i) { - _getBigintValue_fn_t getVectorBigintValueFnLeft; - _getBigintValue_fn_t getVectorBigintValueFnRight; - SCL_ERR_RET(getVectorBigintValueFn(pLeftCol->info.type, &getVectorBigintValueFnLeft)); - SCL_ERR_RET(getVectorBigintValueFn(pRightCol->info.type, &getVectorBigintValueFnRight)); - - int64_t *output = (int64_t *)pOutputCol->pData; - - if (IS_HELPER_NULL(pRightCol, 0)) { // Set pLeft->numOfRows NULL value - colDataSetNNULL(pOutputCol, 0, numOfRows); - } else { - for (; i >= 0 && i < numOfRows; i += step, output += 1) { - if (IS_HELPER_NULL(pLeftCol, i)) { - colDataSetNULL(pOutputCol, i); - continue; // TODO set null or ignore - } - int64_t leftRes = 0; - int64_t rightRes = 0; - SCL_ERR_RET(getVectorBigintValueFnLeft(LEFT_COL, i, &leftRes)); - SCL_ERR_RET(getVectorBigintValueFnRight(RIGHT_COL, 0, &rightRes)); - *output = leftRes & rightRes; - } - } - SCL_RET(TSDB_CODE_SUCCESS); -} - int32_t vectorBitAnd(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *pOut, int32_t _ord) { SColumnInfoData *pOutputCol = pOut->columnData; pOut->numOfRows = TMAX(pLeft->numOfRows, pRight->numOfRows); @@ -1806,22 +1691,19 @@ int32_t vectorBitAnd(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *pO SCL_ERR_JRET(getVectorBigintValueFn(pRightCol->info.type, &getVectorBigintValueFnRight)); int64_t *output = (int64_t *)pOutputCol->pData; - if (pLeft->numOfRows == pRight->numOfRows) { - for (; i < pRight->numOfRows && i >= 0; i += step, output += 1) { - if (IS_NULL) { - colDataSetNULL(pOutputCol, i); - continue; // TODO set null or ignore - } - int64_t leftRes = 0; - int64_t rightRes = 0; - SCL_ERR_JRET(getVectorBigintValueFnLeft(LEFT_COL, i, &leftRes)); - SCL_ERR_JRET(getVectorBigintValueFnRight(RIGHT_COL, i, &rightRes)); - *output = leftRes & rightRes; + int32_t numOfRows = TMAX(pLeft->numOfRows, pRight->numOfRows); + for (; i < numOfRows && i >= 0; i += step, output += 1) { + int32_t leftidx = pLeft->numOfRows == 1 ? 0 : i; + int32_t rightidx = pRight->numOfRows == 1 ? 0 : i; + if (IS_HELPER_NULL(pRightCol, rightidx) || IS_HELPER_NULL(pLeftCol, leftidx)) { + colDataSetNULL(pOutputCol, i); + continue; // TODO set null or ignore } - } else if (pLeft->numOfRows == 1) { - SCL_ERR_JRET(vectorBitAndHelper(pRightCol, pLeftCol, pOutputCol, pRight->numOfRows, step, i)); - } else if (pRight->numOfRows == 1) { - SCL_ERR_JRET(vectorBitAndHelper(pLeftCol, pRightCol, pOutputCol, pLeft->numOfRows, step, i)); + int64_t leftRes = 0; + int64_t rightRes = 0; + SCL_ERR_JRET(getVectorBigintValueFnLeft(LEFT_COL, leftidx, &leftRes)); + SCL_ERR_JRET(getVectorBigintValueFnRight(RIGHT_COL, rightidx, &rightRes)); + *output = leftRes & rightRes; } _return: @@ -1830,33 +1712,6 @@ _return: SCL_RET(code); } -static int32_t vectorBitOrHelper(SColumnInfoData *pLeftCol, SColumnInfoData *pRightCol, SColumnInfoData *pOutputCol, - int32_t numOfRows, int32_t step, int32_t i) { - _getBigintValue_fn_t getVectorBigintValueFnLeft; - _getBigintValue_fn_t getVectorBigintValueFnRight; - SCL_ERR_RET(getVectorBigintValueFn(pLeftCol->info.type, &getVectorBigintValueFnLeft)); - SCL_ERR_RET(getVectorBigintValueFn(pRightCol->info.type, &getVectorBigintValueFnRight)); - - int64_t *output = (int64_t *)pOutputCol->pData; - - if (IS_HELPER_NULL(pRightCol, 0)) { // Set pLeft->numOfRows NULL value - colDataSetNNULL(pOutputCol, 0, numOfRows); - } else { - int64_t rx = 0; - SCL_ERR_RET(getVectorBigintValueFnRight(RIGHT_COL, 0, &rx)); - for (; i >= 0 && i < numOfRows; i += step, output += 1) { - if (IS_HELPER_NULL(pLeftCol, i)) { - colDataSetNULL(pOutputCol, i); - continue; // TODO set null or ignore - } - int64_t lx = 0; - SCL_ERR_RET(getVectorBigintValueFnLeft(LEFT_COL, i, &lx)); - *output = lx | rx; - } - } - SCL_RET(TSDB_CODE_SUCCESS); -} - int32_t vectorBitOr(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *pOut, int32_t _ord) { SColumnInfoData *pOutputCol = pOut->columnData; pOut->numOfRows = TMAX(pLeft->numOfRows, pRight->numOfRows); @@ -1877,22 +1732,20 @@ int32_t vectorBitOr(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *pOu SCL_ERR_JRET(getVectorBigintValueFn(pRightCol->info.type, &getVectorBigintValueFnRight)); int64_t *output = (int64_t *)pOutputCol->pData; - if (pLeft->numOfRows == pRight->numOfRows) { - for (; i < pRight->numOfRows && i >= 0; i += step, output += 1) { - if (IS_NULL) { - colDataSetNULL(pOutputCol, i); - continue; // TODO set null or ignore - } - int64_t leftRes = 0; - int64_t rightRes = 0; - SCL_ERR_JRET(getVectorBigintValueFnLeft(LEFT_COL, i, &leftRes)); - SCL_ERR_JRET(getVectorBigintValueFnRight(RIGHT_COL, i, &rightRes)); - *output = leftRes | rightRes; + int32_t numOfRows = TMAX(pLeft->numOfRows, pRight->numOfRows); + for (; i < numOfRows && i >= 0; i += step, output += 1) { + int32_t leftidx = pLeft->numOfRows == 1 ? 0 : i; + int32_t rightidx = pRight->numOfRows == 1 ? 0 : i; + if (IS_HELPER_NULL(pRightCol, leftidx) || IS_HELPER_NULL(pLeftCol, rightidx)) { + colDataSetNULL(pOutputCol, i); + continue; // TODO set null or ignore } - } else if (pLeft->numOfRows == 1) { - SCL_ERR_JRET(vectorBitOrHelper(pRightCol, pLeftCol, pOutputCol, pRight->numOfRows, step, i)); - } else if (pRight->numOfRows == 1) { - SCL_ERR_JRET(vectorBitOrHelper(pLeftCol, pRightCol, pOutputCol, pLeft->numOfRows, step, i)); + + int64_t leftRes = 0; + int64_t rightRes = 0; + SCL_ERR_JRET(getVectorBigintValueFnLeft(LEFT_COL, leftidx, &leftRes)); + SCL_ERR_JRET(getVectorBigintValueFnRight(RIGHT_COL, rightidx, &rightRes)); + *output = leftRes | rightRes; } _return: @@ -1992,13 +1845,14 @@ int32_t doVectorCompareImpl(SScalarParam *pLeft, SScalarParam *pRight, SScalarPa return code; } -int32_t doVectorCompare(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *pOut, int32_t startIndex, +int32_t doVectorCompare(SScalarParam *pLeft, SScalarParam *pLeftVar, SScalarParam *pRight, SScalarParam *pOut, int32_t startIndex, int32_t numOfRows, int32_t _ord, int32_t optr) { int32_t i = 0; int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; int32_t lType = GET_PARAM_TYPE(pLeft); int32_t rType = GET_PARAM_TYPE(pRight); __compar_fn_t fp = NULL; + __compar_fn_t fpVar = NULL; int32_t compRows = 0; if (lType == rType) { SCL_ERR_RET(filterGetCompFunc(&fp, lType, optr)); @@ -2006,6 +1860,9 @@ int32_t doVectorCompare(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam fp = filterGetCompFuncEx(lType, rType, optr); } + if (pLeftVar != NULL) { + SCL_ERR_RET(filterGetCompFunc(&fpVar, GET_PARAM_TYPE(pLeftVar), optr)); + } if (startIndex < 0) { i = ((_ord) == TSDB_ORDER_ASC) ? 0 : TMAX(pLeft->numOfRows, pRight->numOfRows) - 1; pOut->numOfRows = TMAX(pLeft->numOfRows, pRight->numOfRows); @@ -2025,6 +1882,18 @@ int32_t doVectorCompare(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam char *pLeftData = colDataGetData(pLeft->columnData, i); bool res = filterDoCompare(fp, optr, pLeftData, pRight->pHashFilter); + if (pLeftVar != NULL && taosHashGetSize(pRight->pHashFilterOthers) > 0){ + do{ + if (optr == OP_TYPE_IN && res){ + break; + } + if (optr == OP_TYPE_NOT_IN && !res){ + break; + } + pLeftData = colDataGetData(pLeftVar->columnData, i); + res = filterDoCompare(fpVar, optr, pLeftData, pRight->pHashFilterOthers); + }while(0); + } colDataSetInt8(pOut->columnData, i, (int8_t *)&res); if (res) { pOut->numOfQualified++; @@ -2042,6 +1911,7 @@ int32_t vectorCompareImpl(SScalarParam *pLeft, SScalarParam *pRight, SScalarPara SScalarParam pRightOut = {0}; SScalarParam *param1 = NULL; SScalarParam *param2 = NULL; + SScalarParam *param3 = NULL; int32_t code = TSDB_CODE_SUCCESS; setTzCharset(&pLeftOut, pLeft->tz, pLeft->charsetCxt); setTzCharset(&pRightOut, pLeft->tz, pLeft->charsetCxt); @@ -2052,9 +1922,12 @@ int32_t vectorCompareImpl(SScalarParam *pLeft, SScalarParam *pRight, SScalarPara SCL_ERR_JRET(vectorConvertCols(pLeft, pRight, &pLeftOut, &pRightOut, startIndex, numOfRows)); param1 = (pLeftOut.columnData != NULL) ? &pLeftOut : pLeft; param2 = (pRightOut.columnData != NULL) ? &pRightOut : pRight; + if (pRight->pHashFilterOthers != NULL){ + param3 = pLeft; + } } - SCL_ERR_JRET(doVectorCompare(param1, param2, pOut, startIndex, numOfRows, _ord, optr)); + SCL_ERR_JRET(doVectorCompare(param1, param3, param2, pOut, startIndex, numOfRows, _ord, optr)); _return: sclFreeParam(&pLeftOut); @@ -2306,3 +2179,16 @@ _bin_scalar_fn_t getBinScalarOperatorFn(int32_t binFunctionId) { return NULL; } } + +bool checkOperatorRestypeIsTimestamp(EOperatorType opType, int32_t lType, int32_t rType) { + if (opType != OP_TYPE_ADD && opType != OP_TYPE_SUB && opType != OP_TYPE_MINUS) { + return false; + } + if ((TSDB_DATA_TYPE_TIMESTAMP == lType && IS_INTEGER_TYPE(rType) && rType != TSDB_DATA_TYPE_UBIGINT) || + (TSDB_DATA_TYPE_TIMESTAMP == rType && IS_INTEGER_TYPE(lType) && lType != TSDB_DATA_TYPE_UBIGINT) || + (TSDB_DATA_TYPE_TIMESTAMP == lType && TSDB_DATA_TYPE_BOOL == rType) || + (TSDB_DATA_TYPE_TIMESTAMP == rType && TSDB_DATA_TYPE_BOOL == lType)) { + return true; + } + return false; +} diff --git a/source/libs/scalar/test/scalar/scalarTests.cpp b/source/libs/scalar/test/scalar/scalarTests.cpp index 3eae06d9bb..fec9d14ae0 100644 --- a/source/libs/scalar/test/scalar/scalarTests.cpp +++ b/source/libs/scalar/test/scalar/scalarTests.cpp @@ -391,6 +391,26 @@ TEST(constantTest, bigint_add_bigint) { nodesDestroyNode(res); } +TEST(constantTest, ubigint_add_ubigint) { + SNode *pLeft = NULL, *pRight = NULL, *opNode = NULL, *res = NULL; + int32_t code = TSDB_CODE_SUCCESS; + code = scltMakeValueNode(&pLeft, TSDB_DATA_TYPE_UBIGINT, &scltLeftV); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + code = scltMakeValueNode(&pRight, TSDB_DATA_TYPE_UBIGINT, &scltRightV); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + code = scltMakeOpNode(&opNode, OP_TYPE_ADD, TSDB_DATA_TYPE_UBIGINT, pLeft, pRight); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + code = scalarCalculateConstants(opNode, &res); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + ASSERT_TRUE(res); + ASSERT_EQ(nodeType(res), QUERY_NODE_VALUE); + SValueNode *v = (SValueNode *)res; + ASSERT_EQ(v->node.resType.type, TSDB_DATA_TYPE_UBIGINT); + ASSERT_FLOAT_EQ(v->datum.d, (scltLeftV + scltRightV)); + nodesDestroyNode(res); +} + TEST(constantTest, double_sub_bigint) { SNode *pLeft = NULL, *pRight = NULL, *opNode = NULL, *res = NULL; int32_t code = TSDB_CODE_SUCCESS; @@ -431,6 +451,66 @@ TEST(constantTest, tinyint_and_smallint) { nodesDestroyNode(res); } +TEST(constantTest, utinyint_and_usmallint) { + SNode *pLeft = NULL, *pRight = NULL, *opNode = NULL, *res = NULL; + int32_t code = TSDB_CODE_SUCCESS; + code = scltMakeValueNode(&pLeft, TSDB_DATA_TYPE_UTINYINT, &scltLeftV); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + code = scltMakeValueNode(&pRight, TSDB_DATA_TYPE_USMALLINT, &scltRightV); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + code = scltMakeOpNode(&opNode, OP_TYPE_BIT_AND, TSDB_DATA_TYPE_BIGINT, pLeft, pRight); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + code = scalarCalculateConstants(opNode, &res); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + ASSERT_TRUE(res); + ASSERT_EQ(nodeType(res), QUERY_NODE_VALUE); + SValueNode *v = (SValueNode *)res; + ASSERT_EQ(v->node.resType.type, TSDB_DATA_TYPE_BIGINT); + ASSERT_EQ(v->datum.i, (int64_t)scltLeftV & (int64_t)scltRightV); + nodesDestroyNode(res); +} + +TEST(constantTest, uint_and_usmallint) { + SNode *pLeft = NULL, *pRight = NULL, *opNode = NULL, *res = NULL; + int32_t code = TSDB_CODE_SUCCESS; + code = scltMakeValueNode(&pLeft, TSDB_DATA_TYPE_UINT, &scltLeftV); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + code = scltMakeValueNode(&pRight, TSDB_DATA_TYPE_USMALLINT, &scltRightV); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + code = scltMakeOpNode(&opNode, OP_TYPE_BIT_AND, TSDB_DATA_TYPE_BIGINT, pLeft, pRight); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + code = scalarCalculateConstants(opNode, &res); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + ASSERT_TRUE(res); + ASSERT_EQ(nodeType(res), QUERY_NODE_VALUE); + SValueNode *v = (SValueNode *)res; + ASSERT_EQ(v->node.resType.type, TSDB_DATA_TYPE_BIGINT); + ASSERT_EQ(v->datum.i, (int64_t)scltLeftV & (int64_t)scltRightV); + nodesDestroyNode(res); +} + +TEST(constantTest, ubigint_and_uint) { + SNode *pLeft = NULL, *pRight = NULL, *opNode = NULL, *res = NULL; + int32_t code = TSDB_CODE_SUCCESS; + code = scltMakeValueNode(&pLeft, TSDB_DATA_TYPE_UBIGINT, &scltLeftV); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + code = scltMakeValueNode(&pRight, TSDB_DATA_TYPE_UINT, &scltRightV); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + code = scltMakeOpNode(&opNode, OP_TYPE_BIT_AND, TSDB_DATA_TYPE_BIGINT, pLeft, pRight); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + code = scalarCalculateConstants(opNode, &res); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + ASSERT_TRUE(res); + ASSERT_EQ(nodeType(res), QUERY_NODE_VALUE); + SValueNode *v = (SValueNode *)res; + ASSERT_EQ(v->node.resType.type, TSDB_DATA_TYPE_BIGINT); + ASSERT_EQ(v->datum.i, (int64_t)scltLeftV & (int64_t)scltRightV); + nodesDestroyNode(res); +} + TEST(constantTest, bigint_or_double) { SNode *pLeft = NULL, *pRight = NULL, *opNode = NULL, *res = NULL; int32_t code = TSDB_CODE_SUCCESS; @@ -494,6 +574,53 @@ TEST(constantTest, int_greater_double) { nodesDestroyNode(res); } +TEST(constantTest, binary_greater_equal_varbinary) { + SNode *pLeft = NULL, *pRight = NULL, *opNode = NULL, *res = NULL; + char binaryStr[64] = {0}; + int32_t code = TSDB_CODE_SUCCESS; + (void)sprintf(&binaryStr[2], "%d", scltRightV); + varDataSetLen(binaryStr, strlen(&binaryStr[2])); + code = scltMakeValueNode(&pLeft, TSDB_DATA_TYPE_VARBINARY, binaryStr); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + code = scltMakeValueNode(&pRight, TSDB_DATA_TYPE_BINARY, binaryStr); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + code = scltMakeOpNode(&opNode, OP_TYPE_GREATER_THAN, TSDB_DATA_TYPE_BOOL, pLeft, pRight); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + code = scalarCalculateConstants(opNode, &res); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + ASSERT_TRUE(res); + ASSERT_EQ(nodeType(res), QUERY_NODE_VALUE); + SValueNode *v = (SValueNode *)res; + ASSERT_EQ(v->node.resType.type, TSDB_DATA_TYPE_BOOL); + ASSERT_EQ(v->datum.b, scltLeftV < scltRightVd); + nodesDestroyNode(res); +} + +TEST(constantTest, binary_equal_geo) { + SNode *pLeft = NULL, *pRight = NULL, *opNode = NULL, *res = NULL; + char geoRawStr[64] = "POLYGON((30 10, 40 40, 20 40, 10 20, 30 10))"; + char geoStr[64] = {0}; + int32_t code = TSDB_CODE_SUCCESS; + (void)sprintf(&geoStr[2], "%s", geoRawStr); + varDataSetLen(geoStr, strlen(&geoStr[2])); + code = scltMakeValueNode(&pLeft, TSDB_DATA_TYPE_GEOMETRY, geoStr); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + code = scltMakeValueNode(&pRight, TSDB_DATA_TYPE_BINARY, geoStr); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + code = scltMakeOpNode(&opNode, OP_TYPE_EQUAL, TSDB_DATA_TYPE_BOOL, pLeft, pRight); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + code = scalarCalculateConstants(opNode, &res); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + ASSERT_TRUE(res); + ASSERT_EQ(nodeType(res), QUERY_NODE_VALUE); + SValueNode *v = (SValueNode *)res; + ASSERT_EQ(v->node.resType.type, TSDB_DATA_TYPE_BOOL); + ASSERT_EQ(v->datum.b, scltLeftV < scltRightVd); + nodesDestroyNode(res); +} + TEST(constantTest, int_greater_equal_binary) { SNode *pLeft = NULL, *pRight = NULL, *opNode = NULL, *res = NULL; char binaryStr[64] = {0}; @@ -2106,7 +2233,7 @@ TEST(columnTest, int_column_in_double_list) { SNode *pLeft = NULL, *pRight = NULL, *listNode = NULL, *opNode = NULL; int32_t leftv[5] = {1, 2, 3, 4, 5}; double rightv1 = 1.1, rightv2 = 2.2, rightv3 = 3.3; - bool eRes[5] = {true, true, true, false, false}; + bool eRes[5] = {false, false, false, false, false}; SSDataBlock *src = NULL; int32_t rowNum = sizeof(leftv) / sizeof(leftv[0]); int32_t code = TSDB_CODE_SUCCESS; diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index d15ac7a791..a031bc08de 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -531,8 +531,8 @@ int32_t schHandleNotifyCallback(void *param, SDataBuf *pMsg, int32_t code) { qDebug("QID:0x%" PRIx64 ",SID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 " task notify rsp received, code:0x%x", pParam->queryId, pParam->seriousId, pParam->clientId, pParam->taskId, code); if (pMsg) { - taosMemoryFree(pMsg->pData); - taosMemoryFree(pMsg->pEpSet); + taosMemoryFreeClear(pMsg->pData); + taosMemoryFreeClear(pMsg->pEpSet); } return TSDB_CODE_SUCCESS; } @@ -545,8 +545,8 @@ int32_t schHandleLinkBrokenCallback(void *param, SDataBuf *pMsg, int32_t code) { qDebug("handle %p is broken", pMsg->handle); if (head->isHbParam) { - taosMemoryFree(pMsg->pData); - taosMemoryFree(pMsg->pEpSet); + taosMemoryFreeClear(pMsg->pData); + taosMemoryFreeClear(pMsg->pEpSet); SSchHbCallbackParam *hbParam = (SSchHbCallbackParam *)param; SSchTrans trans = {.pTrans = hbParam->pTrans, .pHandle = NULL, .pHandleId = 0}; @@ -1293,6 +1293,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, } break; } +/* case TDMT_SCH_QUERY_HEARTBEAT: { SCH_ERR_RET(schMakeHbRpcCtx(pJob, pTask, &rpcCtx)); @@ -1320,6 +1321,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, persistHandle = true; break; } +*/ case TDMT_SCH_TASK_NOTIFY: { ETaskNotifyType* pType = param; STaskNotifyReq qMsg; diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index cb8a68fe4f..b31353e97f 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -189,7 +189,6 @@ int32_t schProcessOnTaskFailure(SSchJob *pJob, SSchTask *pTask, int32_t errCode) } pTask->failedExecId = pTask->execId; - pTask->failedSeriousId = pTask->seriousId; int8_t jobStatus = 0; if (schJobNeedToStop(pJob, &jobStatus)) { @@ -438,7 +437,7 @@ void schResetTaskForRetry(SSchJob *pJob, SSchTask *pTask) { pTask->waitRetry = true; if (pTask->delayTimer) { - taosTmrStop(pTask->delayTimer); + UNUSED(taosTmrStop(pTask->delayTimer)); } schDropTaskOnExecNode(pJob, pTask); @@ -452,6 +451,8 @@ void schResetTaskForRetry(SSchJob *pJob, SSchTask *pTask) { TAOS_MEMSET(&pTask->succeedAddr, 0, sizeof(pTask->succeedAddr)); } +#if 0 + int32_t schDoTaskRedirect(SSchJob *pJob, SSchTask *pTask, SDataBuf *pData, int32_t rspCode) { int32_t code = 0; @@ -593,6 +594,7 @@ _return: SCH_RET(schProcessOnTaskFailure(pJob, pTask, code)); } +#endif int32_t schPushTaskToExecList(SSchJob *pJob, SSchTask *pTask) { int32_t code = taosHashPut(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId), &pTask, POINTER_BYTES); @@ -759,7 +761,7 @@ int32_t schHandleTaskRetry(SSchJob *pJob, SSchTask *pTask) { (void)atomic_sub_fetch_32(&pTask->level->taskLaunchedNum, 1); if (pTask->delayTimer) { - taosTmrStop(pTask->delayTimer); + UNUSED(taosTmrStop(pTask->delayTimer)); } (void)schRemoveTaskFromExecList(pJob, pTask); // ignore error @@ -869,6 +871,7 @@ int32_t schSetTaskCandidateAddrs(SSchJob *pJob, SSchTask *pTask) { return TSDB_CODE_SUCCESS; } +#if 0 int32_t schUpdateTaskCandidateAddr(SSchJob *pJob, SSchTask *pTask, SEpSet *pEpSet) { int32_t code = TSDB_CODE_SUCCESS; if (NULL == pTask->candidateAddrs || 1 != taosArrayGetSize(pTask->candidateAddrs)) { @@ -900,6 +903,7 @@ _return: return code; } +#endif int32_t schSwitchTaskCandidateAddr(SSchJob *pJob, SSchTask *pTask) { int32_t candidateNum = taosArrayGetSize(pTask->candidateAddrs); @@ -1376,6 +1380,7 @@ int32_t schLaunchLevelTasks(SSchJob *pJob, SSchLevel *level) { for (int32_t i = 0; i < level->taskNum; ++i) { SSchTask *pTask = taosArrayGet(level->subTasks, i); + pTask->failedSeriousId = pJob->seriousId - 1; pTask->seriousId = pJob->seriousId; SCH_TASK_DLOG("task seriousId set to 0x%" PRIx64, pTask->seriousId); diff --git a/source/libs/scheduler/test/schedulerTests.cpp b/source/libs/scheduler/test/schedulerTests.cpp index a9878ec9a9..c13ea913f5 100644 --- a/source/libs/scheduler/test/schedulerTests.cpp +++ b/source/libs/scheduler/test/schedulerTests.cpp @@ -57,6 +57,9 @@ namespace { extern "C" int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, uint64_t sId, int32_t execId, SDataBuf *pMsg, int32_t rspCode); extern "C" int32_t schHandleCallback(void *param, const SDataBuf *pMsg, int32_t rspCode); +extern "C" int32_t schHandleNotifyCallback(void *param, SDataBuf *pMsg, int32_t code); +extern "C" int32_t schHandleLinkBrokenCallback(void *param, SDataBuf *pMsg, int32_t code); +extern "C" int32_t schRescheduleTask(SSchJob *pJob, SSchTask *pTask); int64_t insertJobRefId = 0; int64_t queryJobRefId = 0; @@ -316,7 +319,7 @@ void schtBuildQueryFlowCtrlDag(SQueryPlan *dag) { scanPlan->execNode.nodeId = 1 + i; scanPlan->execNode.epSet.inUse = 0; - scanPlan->execNodeStat.tableNum = taosRand() % 30; + scanPlan->execNodeStat.tableNum = taosRand() % 100; addEpIntoEpSet(&scanPlan->execNode.epSet, "ep0", 6030); addEpIntoEpSet(&scanPlan->execNode.epSet, "ep1", 6030); addEpIntoEpSet(&scanPlan->execNode.epSet, "ep2", 6030); @@ -982,8 +985,159 @@ TEST(queryTest, normalCase) { schedulerFreeJob(&job, 0); (void)taosThreadJoin(thread1, NULL); + + schMgmt.jobRef = -1; } +TEST(queryTest, rescheduleCase) { + void *mockPointer = (void *)0x1; + char *clusterId = "cluster1"; + char *dbname = "1.db1"; + char *tablename = "table1"; + SVgroupInfo vgInfo = {0}; + int64_t job = 0; + SQueryPlan *dag = NULL; + int32_t code = nodesMakeNode(QUERY_NODE_PHYSICAL_PLAN, (SNode**)&dag); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + SArray *qnodeList = taosArrayInit(1, sizeof(SQueryNodeLoad)); + + SQueryNodeLoad load = {0}; + load.addr.epSet.numOfEps = 1; + TAOS_STRCPY(load.addr.epSet.eps[0].fqdn, "qnode0.ep"); + load.addr.epSet.eps[0].port = 6031; + assert(taosArrayPush(qnodeList, &load) != NULL); + + TAOS_STRCPY(load.addr.epSet.eps[0].fqdn, "qnode1.ep"); + assert(taosArrayPush(qnodeList, &load) != NULL); + + code = schedulerInit(); + ASSERT_EQ(code, 0); + + schtBuildQueryDag(dag); + + schtSetPlanToString(); + schtSetExecNode(); + schtSetAsyncSendMsgToServer(); + + int32_t queryDone = 0; + + SRequestConnInfo conn = {0}; + conn.pTrans = mockPointer; + SSchedulerReq req = {0}; + req.pConn = &conn; + req.pNodeList = qnodeList; + req.pDag = dag; + req.sql = "select * from tb"; + req.execFp = schtQueryCb; + req.cbParam = &queryDone; + + code = schedulerExecJob(&req, &job); + ASSERT_EQ(code, 0); + + SSchJob *pJob = NULL; + code = schAcquireJob(job, &pJob); + ASSERT_EQ(code, 0); + + schedulerEnableReSchedule(true); + + void *pIter = taosHashIterate(pJob->execTasks, NULL); + while (pIter) { + SSchTask *task = *(SSchTask **)pIter; + task->timeoutUsec = -1; + + code = schRescheduleTask(pJob, task); + ASSERT_EQ(code, 0); + + task->timeoutUsec = SCH_DEFAULT_TASK_TIMEOUT_USEC; + pIter = taosHashIterate(pJob->execTasks, pIter); + } + + pIter = taosHashIterate(pJob->execTasks, NULL); + while (pIter) { + SSchTask *task = *(SSchTask **)pIter; + + SDataBuf msg = {0}; + void *rmsg = NULL; + assert(0 == schtBuildQueryRspMsg(&msg.len, &rmsg)); + msg.msgType = TDMT_SCH_QUERY_RSP; + msg.pData = rmsg; + + code = schHandleResponseMsg(pJob, task, task->seriousId, task->execId, &msg, 0); + + ASSERT_EQ(code, 0); + pIter = taosHashIterate(pJob->execTasks, pIter); + } + + + pIter = taosHashIterate(pJob->execTasks, NULL); + while (pIter) { + SSchTask *task = *(SSchTask **)pIter; + task->timeoutUsec = -1; + + code = schRescheduleTask(pJob, task); + ASSERT_EQ(code, 0); + + task->timeoutUsec = SCH_DEFAULT_TASK_TIMEOUT_USEC; + pIter = taosHashIterate(pJob->execTasks, pIter); + } + + pIter = taosHashIterate(pJob->execTasks, NULL); + while (pIter) { + SSchTask *task = *(SSchTask **)pIter; + if (JOB_TASK_STATUS_EXEC == task->status) { + SDataBuf msg = {0}; + void *rmsg = NULL; + assert(0 == schtBuildQueryRspMsg(&msg.len, &rmsg)); + msg.msgType = TDMT_SCH_QUERY_RSP; + msg.pData = rmsg; + + code = schHandleResponseMsg(pJob, task, task->seriousId, task->execId, &msg, 0); + + ASSERT_EQ(code, 0); + } + + pIter = taosHashIterate(pJob->execTasks, pIter); + } + + while (true) { + if (queryDone) { + break; + } + + taosUsleep(10000); + } + + TdThreadAttr thattr; + assert(0 == taosThreadAttrInit(&thattr)); + + TdThread thread1; + assert(0 == taosThreadCreate(&(thread1), &thattr, schtCreateFetchRspThread, &job)); + + void *data = NULL; + req.syncReq = true; + req.pFetchRes = &data; + + code = schedulerFetchRows(job, &req); + ASSERT_EQ(code, 0); + + SRetrieveTableRsp *pRsp = (SRetrieveTableRsp *)data; + ASSERT_EQ(pRsp->completed, 1); + ASSERT_EQ(pRsp->numOfRows, 10); + taosMemoryFreeClear(data); + + (void)schReleaseJob(job); + + schedulerDestroy(); + + schedulerFreeJob(&job, 0); + + (void)taosThreadJoin(thread1, NULL); + + schMgmt.jobRef = -1; +} + + TEST(queryTest, readyFirstCase) { void *mockPointer = (void *)0x1; char *clusterId = "cluster1"; @@ -1097,6 +1251,7 @@ TEST(queryTest, readyFirstCase) { schedulerFreeJob(&job, 0); (void)taosThreadJoin(thread1, NULL); + schMgmt.jobRef = -1; } TEST(queryTest, flowCtrlCase) { @@ -1196,6 +1351,9 @@ TEST(queryTest, flowCtrlCase) { schedulerFreeJob(&job, 0); (void)taosThreadJoin(thread1, NULL); + schMgmt.jobRef = -1; + + cleanupTaskQueue(); } TEST(insertTest, normalCase) { @@ -1260,6 +1418,7 @@ TEST(insertTest, normalCase) { schedulerDestroy(); (void)taosThreadJoin(thread1, NULL); + schMgmt.jobRef = -1; } TEST(multiThread, forceFree) { @@ -1282,9 +1441,11 @@ TEST(multiThread, forceFree) { schtTestStop = true; // taosSsleep(3); + + schMgmt.jobRef = -1; } -TEST(otherTest, otherCase) { +TEST(otherTest, function) { // excpet test (void)schReleaseJob(0); schFreeRpcCtx(NULL); @@ -1293,6 +1454,39 @@ TEST(otherTest, otherCase) { ASSERT_EQ(schDumpEpSet(NULL, &ep), TSDB_CODE_SUCCESS); ASSERT_EQ(strcmp(schGetOpStr(SCH_OP_NULL), "NULL"), 0); ASSERT_EQ(strcmp(schGetOpStr((SCH_OP_TYPE)100), "UNKNOWN"), 0); + + SSchTaskCallbackParam param = {0}; + SDataBuf dataBuf = {0}; + dataBuf.pData = taosMemoryMalloc(1); + dataBuf.pEpSet = (SEpSet*)taosMemoryMalloc(sizeof(*dataBuf.pEpSet)); + ASSERT_EQ(schHandleNotifyCallback(¶m, &dataBuf, TSDB_CODE_SUCCESS), TSDB_CODE_SUCCESS); + + SSchCallbackParamHeader param2 = {0}; + dataBuf.pData = taosMemoryMalloc(1); + dataBuf.pEpSet = (SEpSet*)taosMemoryMalloc(sizeof(*dataBuf.pEpSet)); + schHandleLinkBrokenCallback(¶m2, &dataBuf, TSDB_CODE_SUCCESS); + param2.isHbParam = true; + dataBuf.pData = taosMemoryMalloc(1); + dataBuf.pEpSet = (SEpSet*)taosMemoryMalloc(sizeof(*dataBuf.pEpSet)); + schHandleLinkBrokenCallback(¶m2, &dataBuf, TSDB_CODE_SUCCESS); + + schMgmt.jobRef = -1; +} + +void schtReset() { + insertJobRefId = 0; + queryJobRefId = 0; + + schtJobDone = false; + schtMergeTemplateId = 0x4; + schtFetchTaskId = 0; + schtQueryId = 1; + + schtTestStop = false; + schtTestDeadLoop = false; + schtTestMTRunSec = 1; + schtTestPrintNum = 1000; + schtStartFetch = 0; } int main(int argc, char **argv) { @@ -1302,7 +1496,17 @@ int main(int argc, char **argv) { } taosSeedRand(taosGetTimestampSec()); testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); + + int code = 0; + for (int32_t i = 0; i < 10; ++i) { + schtReset(); + code = RUN_ALL_TESTS(); + if (code) { + break; + } + } + + return code; } #pragma GCC diagnostic pop diff --git a/source/libs/stream/inc/streamInt.h b/source/libs/stream/inc/streamInt.h index e4f921f04a..41ac0117f3 100644 --- a/source/libs/stream/inc/streamInt.h +++ b/source/libs/stream/inc/streamInt.h @@ -200,6 +200,7 @@ void streamTaskInitForLaunchHTask(SHistoryTaskInfo* pInfo); void streamTaskSetRetryInfoForLaunch(SHistoryTaskInfo* pInfo); int32_t streamTaskResetTimewindowFilter(SStreamTask* pTask); void streamTaskClearActiveInfo(SActiveCheckpointInfo* pInfo); +int32_t streamTaskAddIntoNodeUpdateList(SStreamTask* pTask, int32_t nodeId); void streamClearChkptReadyMsg(SActiveCheckpointInfo* pActiveInfo); EExtractDataCode streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInput, int32_t* numOfBlocks, @@ -243,10 +244,15 @@ int32_t flushStateDataInExecutor(SStreamTask* pTask, SStreamQueueItem* pCheckpoi int32_t streamCreateSinkResTrigger(SStreamTrigger** pTrigger); int32_t streamCreateForcewindowTrigger(SStreamTrigger** pTrigger, int32_t trigger, SInterval* pInterval, STimeWindow* pLatestWindow, const char* id); +// inject stream errors +void chkptFailedByRetrieveReqToSource(SStreamTask* pTask, int64_t checkpointId); // inject stream errors void chkptFailedByRetrieveReqToSource(SStreamTask* pTask, int64_t checkpointId); +int32_t uploadCheckpointData(SStreamTask* pTask, int64_t checkpointId, int64_t dbRefId, ECHECKPOINT_BACKUP_TYPE type); +int32_t chkptTriggerRecvMonitorHelper(SStreamTask* pTask, void* param, SArray** ppNotSendList); + #ifdef __cplusplus } #endif diff --git a/source/libs/stream/src/streamCheckStatus.c b/source/libs/stream/src/streamCheckStatus.c index 64b19e4ed9..118cb1cfb6 100644 --- a/source/libs/stream/src/streamCheckStatus.c +++ b/source/libs/stream/src/streamCheckStatus.c @@ -21,7 +21,6 @@ #define CHECK_NOT_RSP_DURATION 10 * 1000 // 10 sec static void processDownstreamReadyRsp(SStreamTask* pTask); -static int32_t addIntoNodeUpdateList(SStreamTask* pTask, int32_t nodeId); static void rspMonitorFn(void* param, void* tmrId); static void streamTaskInitTaskCheckInfo(STaskCheckInfo* pInfo, STaskOutputInfo* pOutputInfo, int64_t startTs); static int32_t streamTaskStartCheckDownstream(STaskCheckInfo* pInfo, const char* id); @@ -226,13 +225,13 @@ int32_t streamTaskProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* stError("s-task:%s vgId:%d self vnode-transfer/leader-change/restart detected, old stage:%" PRId64 ", current stage:%" PRId64 ", not check wait for downstream task nodeUpdate, and all tasks restart", id, pRsp->upstreamNodeId, pRsp->oldStage, pTask->pMeta->stage); - code = addIntoNodeUpdateList(pTask, pRsp->upstreamNodeId); + code = streamTaskAddIntoNodeUpdateList(pTask, pRsp->upstreamNodeId); } else { stError( "s-task:%s downstream taskId:0x%x (vgId:%d) not leader, self dispatch epset needs to be updated, not check " "downstream again, nodeUpdate needed", id, pRsp->downstreamTaskId, pRsp->downstreamNodeId); - code = addIntoNodeUpdateList(pTask, pRsp->downstreamNodeId); + code = streamTaskAddIntoNodeUpdateList(pTask, pRsp->downstreamNodeId); } streamMetaAddFailedTaskSelf(pTask, now); @@ -373,11 +372,10 @@ void processDownstreamReadyRsp(SStreamTask* pTask) { } } -int32_t addIntoNodeUpdateList(SStreamTask* pTask, int32_t nodeId) { +int32_t streamTaskAddIntoNodeUpdateList(SStreamTask* pTask, int32_t nodeId) { int32_t vgId = pTask->pMeta->vgId; int32_t code = 0; - ; - bool existed = false; + bool existed = false; streamMutexLock(&pTask->lock); @@ -675,8 +673,8 @@ void handleTimeoutDownstreamTasks(SStreamTask* pTask, SArray* pTimeoutList) { SDownstreamStatusInfo* p = NULL; findCheckRspStatus(pInfo, *pTaskId, &p); if (p != NULL) { - code = addIntoNodeUpdateList(pTask, p->vgId); - stDebug("s-task:%s vgId:%d downstream task:0x%x (vgId:%d) timeout more than 100sec, add into nodeUpate list", + code = streamTaskAddIntoNodeUpdateList(pTask, p->vgId); + stDebug("s-task:%s vgId:%d downstream task:0x%x (vgId:%d) timeout more than 100sec, add into nodeUpdate list", id, vgId, p->taskId, p->vgId); } } @@ -717,7 +715,7 @@ void handleNotReadyDownstreamTask(SStreamTask* pTask, SArray* pNotReadyList) { // the action of add status may incur the restart procedure, which should NEVER be executed in the timer thread. // The restart of all tasks requires that all tasks should not have active timer for now. Therefore, the execution -// of restart in timer thread will result in a dead lock. +// of restart in timer thread will result in a deadlock. int32_t addDownstreamFailedStatusResultAsync(SMsgCb* pMsgCb, int32_t vgId, int64_t streamId, int32_t taskId) { return streamTaskSchedTask(pMsgCb, vgId, streamId, taskId, STREAM_EXEC_T_ADD_FAILED_TASK); } diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index 876cd1b472..0ec66cd2ce 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -21,7 +21,9 @@ static int32_t downloadCheckpointDataByName(const char* id, const char* fname, const char* dstName); static int32_t deleteCheckpointFile(const char* id, const char* name); static int32_t streamTaskUploadCheckpoint(const char* id, const char* path, int64_t checkpointId); +#ifdef BUILD_NO_CALL static int32_t deleteCheckpoint(const char* id); +#endif static int32_t downloadCheckpointByNameS3(const char* id, const char* fname, const char* dstName); static int32_t continueDispatchCheckpointTriggerBlock(SStreamDataBlock* pBlock, SStreamTask* pTask); static int32_t appendCheckpointIntoInputQ(SStreamTask* pTask, int32_t checkpointType, int64_t checkpointId, @@ -998,7 +1000,7 @@ static int32_t doFindNotSendUpstream(SStreamTask* pTask, SArray* pList, SArray** return 0; } -static int32_t chkptTriggerRecvMonitorHelper(SStreamTask* pTask, void* param, SArray** ppNotSendList) { +int32_t chkptTriggerRecvMonitorHelper(SStreamTask* pTask, void* param, SArray** ppNotSendList) { const char* id = pTask->id.idStr; SArray* pList = pTask->upstreamInfo.pList; // send msg to retrieve checkpoint trigger msg SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; @@ -1492,6 +1494,7 @@ int32_t streamTaskDownloadCheckpointData(const char* id, char* path, int64_t che return 0; } +#ifdef BUILD_NO_CALL int32_t deleteCheckpoint(const char* id) { if (id == NULL || strlen(id) == 0) { stError("deleteCheckpoint parameters invalid"); @@ -1504,6 +1507,7 @@ int32_t deleteCheckpoint(const char* id) { } return 0; } +#endif int32_t deleteCheckpointFile(const char* id, const char* name) { char object[128] = {0}; diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index ad9be63674..fa16cace25 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -1552,7 +1552,6 @@ static bool setDispatchRspInfo(SDispatchMsgInfo* pMsgInfo, int32_t vgId, int32_t int32_t* pFailed, const char* id) { int32_t numOfRsp = 0; int32_t numOfFailed = 0; - bool allRsp = false; int32_t numOfDispatchBranch = taosArrayGetSize(pMsgInfo->pSendInfo); @@ -1639,6 +1638,7 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i int32_t notRsp = 0; int32_t numOfFailed = 0; bool triggerDispatchRsp = false; + bool addFailure = false; SActiveCheckpointInfo* pInfo = pTask->chkInfo.pActiveInfo; int64_t tmpCheckpointId = -1; int32_t tmpTranId = -1; @@ -1698,6 +1698,7 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i } else { if (pRsp->inputStatus == TASK_INPUT_STATUS__REFUSED) { // todo handle the role-changed during checkpoint generation, add test case + addFailure = true; stError( "s-task:%s downstream task:0x%x(vgId:%d) refused the dispatch msg, downstream may become follower or " "restart already, treat it as success", @@ -1745,6 +1746,11 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i msgId, pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->inputStatus, tstrerror(code)); } + if (addFailure) { // add failure downstream node id, and start the nodeEp update procedure + // ignore the return error and continue + int32_t unused = streamTaskAddIntoNodeUpdateList(pTask, pRsp->downstreamNodeId); + } + // all msg rsp already, continue // we need to re-try send dispatch msg to downstream tasks if (allRsp && (numOfFailed == 0)) { @@ -1866,6 +1872,11 @@ int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, S } } +#if 0 + // inject errors, and always refuse the upstream dispatch msg and trigger the task nodeEpset update trans. + status = TASK_INPUT_STATUS__REFUSED; +#endif + { // do send response with the input status int32_t code = buildDispatchRsp(pTask, pReq, status, &pRsp->pCont); diff --git a/source/libs/stream/test/streamCheckPointTest.cpp b/source/libs/stream/test/streamCheckPointTest.cpp new file mode 100644 index 0000000000..80dd3ec142 --- /dev/null +++ b/source/libs/stream/test/streamCheckPointTest.cpp @@ -0,0 +1,270 @@ +#include +#include "tstream.h" +#include "streamInt.h" + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wwrite-strings" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic ignored "-Wunused-variable" +#pragma GCC diagnostic ignored "-Wsign-compare" +#pragma GCC diagnostic ignored "-Wsign-compare" +#pragma GCC diagnostic ignored "-Wformat" +#pragma GCC diagnostic ignored "-Wint-to-pointer-cast" +#pragma GCC diagnostic ignored "-Wpointer-arith" + +void initTaskLock(SStreamTask* pTask) { + TdThreadMutexAttr attr = {0}; + int32_t code = taosThreadMutexAttrInit(&attr); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + code = taosThreadMutexAttrSetType(&attr, PTHREAD_MUTEX_RECURSIVE); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + code = taosThreadMutexInit(&pTask->lock, &attr); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + code = taosThreadMutexAttrDestroy(&attr); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); +} + +TEST(streamCheckpointTest, StreamTaskProcessCheckpointTriggerRsp) { + SStreamTask* pTask = NULL; + int64_t uid = 1111111111111111; + SArray* array = taosArrayInit(4, POINTER_BYTES); + int32_t code = tNewStreamTask(uid, TASK_LEVEL__SINK, NULL, false, 0, 0, array, + false, 1, &pTask); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + initTaskLock(pTask); + + code = streamTaskCreateActiveChkptInfo(&pTask->chkInfo.pActiveInfo); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + pTask->chkInfo.pActiveInfo->activeId = 123111; + pTask->chkInfo.pActiveInfo->transId = 4561111; + + streamTaskSetStatusReady(pTask); + code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_GEN_CHECKPOINT); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + SCheckpointTriggerRsp pRsp; + memset(&pRsp, 0, sizeof(SCheckpointTriggerRsp)); + pRsp.rspCode = TSDB_CODE_SUCCESS; + pRsp.checkpointId = 123; + pRsp.transId = 456; + pRsp.upstreamTaskId = 789; + + code = streamTaskProcessCheckpointTriggerRsp(pTask, &pRsp); + ASSERT_NE(code, TSDB_CODE_SUCCESS); + + pRsp.rspCode = TSDB_CODE_FAILED; + code = streamTaskProcessCheckpointTriggerRsp(pTask, &pRsp); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + tFreeStreamTask(pTask); + taosArrayDestroy(array); +} + +TEST(streamCheckpointTest, StreamTaskSetFailedCheckpointId) { + SStreamTask* pTask = NULL; + int64_t uid = 1111111111111111; + SArray* array = taosArrayInit(4, POINTER_BYTES); + int32_t code = tNewStreamTask(uid, TASK_LEVEL__SINK, NULL, false, 0, 0, array, + false, 1, &pTask); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + initTaskLock(pTask); + + code = streamTaskCreateActiveChkptInfo(&pTask->chkInfo.pActiveInfo); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + SActiveCheckpointInfo* pInfo = pTask->chkInfo.pActiveInfo; + pInfo->failedId = 0; + + int64_t failedCheckpointId = 123; + + streamTaskSetFailedCheckpointId(pTask, failedCheckpointId); + ASSERT_EQ(pInfo->failedId, failedCheckpointId); + + streamTaskSetFailedCheckpointId(pTask, 0); + ASSERT_EQ(pInfo->failedId, failedCheckpointId); + + streamTaskSetFailedCheckpointId(pTask, pInfo->failedId - 1); + ASSERT_EQ(pInfo->failedId, failedCheckpointId); + tFreeStreamTask(pTask); + taosArrayDestroy(array); +} + +TEST(UploadCheckpointDataTest, UploadSuccess) { + streamMetaInit(); + SStreamTask* pTask = NULL; + int64_t uid = 1111111111111111; + SArray* array = taosArrayInit(4, POINTER_BYTES); + int32_t code = tNewStreamTask(uid, TASK_LEVEL__SINK, NULL, false, 0, 0, array, + false, 1, &pTask); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + initTaskLock(pTask); + + code = streamTaskCreateActiveChkptInfo(&pTask->chkInfo.pActiveInfo); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + int64_t checkpointId = 123; + int64_t dbRefId = 1; + ECHECKPOINT_BACKUP_TYPE type = DATA_UPLOAD_S3; + + STaskDbWrapper* pBackend = NULL; + int64_t processVer = -1; + const char *path = "/tmp/backend3/stream"; + code = streamMetaOpen((path), NULL, NULL, NULL, 0, 0, NULL, &pTask->pMeta); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + SStreamState *pState = streamStateOpen((char *)path, pTask, 0, 0); + ASSERT(pState != NULL); + + pTask->pBackend = pState->pTdbState->pOwner->pBackend; + + code = taskDbDoCheckpoint(pTask->pBackend, checkpointId, 0); + ASSERT(code == 0); + + int32_t result = uploadCheckpointData(pTask, checkpointId, dbRefId, type); + + EXPECT_EQ(result, TSDB_CODE_SUCCESS) << "uploadCheckpointData should return 0 on success"; + tFreeStreamTask(pTask); + taosRemoveDir(path); + streamStateClose(pState, true); + taosArrayDestroy(array); +} + +TEST(UploadCheckpointDataTest, UploadDisabled) { + SStreamTask* pTask = NULL; + int64_t uid = 2222222222222; + SArray* array = taosArrayInit(4, POINTER_BYTES); + int32_t code = tNewStreamTask(uid, TASK_LEVEL__SINK, NULL, false, 0, 0, array, + false, 1, &pTask); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + initTaskLock(pTask); + + code = streamTaskCreateActiveChkptInfo(&pTask->chkInfo.pActiveInfo); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + int64_t checkpointId = 123; + int64_t dbRefId = 1; + + STaskDbWrapper* pBackend = NULL; + int64_t processVer = -1; + const char *path = "/tmp/backend4/stream"; + code = streamMetaOpen((path), NULL, NULL, NULL, 0, 0, NULL, &pTask->pMeta); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + SStreamState *pState = streamStateOpen((char *)path, pTask, 0, 0); + ASSERT(pState != NULL); + + pTask->pBackend = pState->pTdbState->pOwner->pBackend; + + code = taskDbDoCheckpoint(pTask->pBackend, checkpointId, 0); + ASSERT(code == 0); + + ECHECKPOINT_BACKUP_TYPE type = DATA_UPLOAD_DISABLE; + + int32_t result = uploadCheckpointData(pTask, checkpointId, dbRefId, type); + + EXPECT_NE(result, TSDB_CODE_SUCCESS) << "uploadCheckpointData should return 0 when backup type is disabled"; + + streamStateClose(pState, true); + tFreeStreamTask(pTask); + taosArrayDestroy(array); +} + +TEST(StreamTaskAlreadySendTriggerTest, AlreadySendTrigger) { + SStreamTask* pTask = NULL; + int64_t uid = 2222222222222; + SArray* array = taosArrayInit(4, POINTER_BYTES); + int32_t code = tNewStreamTask(uid, TASK_LEVEL__SINK, NULL, false, 0, 0, array, + false, 1, &pTask); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + initTaskLock(pTask); + + code = streamTaskCreateActiveChkptInfo(&pTask->chkInfo.pActiveInfo); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + pTask->chkInfo.pActiveInfo->activeId = 123111; + pTask->chkInfo.pActiveInfo->transId = 4561111; + + streamTaskSetStatusReady(pTask); + code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_GEN_CHECKPOINT); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + int32_t downstreamNodeId = 1; + int64_t sendingCheckpointId = 123; + TSKEY ts = taosGetTimestampMs(); + + STaskTriggerSendInfo triggerInfo; + triggerInfo.sendTs = ts; + triggerInfo.recved = false; + triggerInfo.nodeId = downstreamNodeId; + + taosArrayPush(pTask->chkInfo.pActiveInfo->pDispatchTriggerList, &triggerInfo); + + pTask->chkInfo.pActiveInfo->dispatchTrigger = true; + bool result = streamTaskAlreadySendTrigger(pTask, downstreamNodeId); + + EXPECT_TRUE(result) << "The trigger message should have been sent to the downstream node"; + + tFreeStreamTask(pTask); + taosArrayDestroy(array); +} + +TEST(ChkptTriggerRecvMonitorHelperTest, chkptTriggerRecvMonitorHelper) { + SStreamTask* pTask = NULL; + int64_t uid = 2222222222222; + SArray* array = taosArrayInit(4, POINTER_BYTES); + int32_t code = tNewStreamTask(uid, TASK_LEVEL__SINK, NULL, false, 0, 0, array, + false, 1, &pTask); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + initTaskLock(pTask); + + const char *path = "/tmp/backend5/stream"; + code = streamMetaOpen((path), NULL, NULL, NULL, 0, 0, NULL, &pTask->pMeta); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + code = streamTaskCreateActiveChkptInfo(&pTask->chkInfo.pActiveInfo); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + pTask->chkInfo.pActiveInfo->activeId = 123111; + pTask->chkInfo.pActiveInfo->chkptTriggerMsgTmr.launchChkptId = pTask->chkInfo.pActiveInfo->activeId; + pTask->chkInfo.pActiveInfo->transId = 4561111; + pTask->chkInfo.startTs = 11111; + + streamTaskSetStatusReady(pTask); + code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_GEN_CHECKPOINT); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + int32_t downstreamNodeId = 1; + int64_t sendingCheckpointId = 123; + TSKEY ts = taosGetTimestampMs(); + + STaskTriggerSendInfo triggerInfo; + triggerInfo.sendTs = ts; + triggerInfo.recved = false; + triggerInfo.nodeId = downstreamNodeId; + + taosArrayPush(pTask->chkInfo.pActiveInfo->pDispatchTriggerList, &triggerInfo); + + pTask->chkInfo.pActiveInfo->dispatchTrigger = true; + SArray* array1 = NULL; + code = chkptTriggerRecvMonitorHelper(pTask, NULL, &array1); + EXPECT_EQ(code, TSDB_CODE_SUCCESS); + + pTask->pMeta->fatalInfo.code = TSDB_CODE_SUCCESS; + streamSetFatalError(pTask->pMeta, code, __func__, __LINE__); + + pTask->pMeta->fatalInfo.code = TSDB_CODE_FAILED; + streamSetFatalError(pTask->pMeta, code, __func__, __LINE__); + tFreeStreamTask(pTask); + taosArrayDestroy(array); + taosArrayDestroy(array1); +} diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c index 2333a4a6a2..19a3f211b1 100644 --- a/source/libs/tdb/src/db/tdbBtree.c +++ b/source/libs/tdb/src/db/tdbBtree.c @@ -1491,7 +1491,7 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, ofpCell = tdbPageGetCell(ofp, 0); int lastKeyPage = 0; - if (nLeftKey <= maxLocal - sizeof(SPgno)) { + if (nLeftKey <= ofp->maxLocal - sizeof(SPgno)) { bytes = nLeftKey; lastKeyPage = 1; lastKeyPageSpace = ofp->maxLocal - sizeof(SPgno) - nLeftKey; diff --git a/source/libs/tfs/src/tfs.c b/source/libs/tfs/src/tfs.c index 4ac72e8918..ecc55517b3 100644 --- a/source/libs/tfs/src/tfs.c +++ b/source/libs/tfs/src/tfs.c @@ -726,3 +726,22 @@ int32_t tfsGetMonitorInfo(STfs *pTfs, SMonDiskInfo *pInfo) { TAOS_RETURN(0); } + +int32_t tfsUpdateDiskDisable(STfs *pTfs, const char *dir, int8_t disable) { + TAOS_UNUSED(tfsLock(pTfs)); + for (int32_t level = 0; level < pTfs->nlevel; level++) { + STfsTier *pTier = &pTfs->tiers[level]; + for (int32_t disk = 0; disk < pTier->ndisk; ++disk) { + STfsDisk *pDisk = pTier->disks[disk]; + if (strcmp(pDisk->path, dir) == 0) { + pDisk->disable = disable; + TAOS_UNUSED(tfsUnLock(pTfs)); + fInfo("disk %s is %s", dir, disable ? "disabled" : "enabled"); + TAOS_RETURN(TSDB_CODE_SUCCESS); + } + } + } + TAOS_UNUSED(tfsUnLock(pTfs)); + fError("failed to update disk disable since %s not found", dir); + TAOS_RETURN(TSDB_CODE_FS_NO_VALID_DISK); +} \ No newline at end of file diff --git a/source/os/src/osAtomic.c b/source/os/src/osAtomic.c index 5da2307cb3..d82c56c99a 100644 --- a/source/os/src/osAtomic.c +++ b/source/os/src/osAtomic.c @@ -355,7 +355,7 @@ void atomic_store_double(double volatile* ptr, double val) { double_number ret_num = {0}; ret_num.i = atomic_val_compare_exchange_64((volatile int64_t*)ptr, old_num.i, new_num.i); - if (ret_num.i == old_num.i) return; + if (ret_num.i == old_num.i) break; } } @@ -414,6 +414,8 @@ int64_t atomic_exchange_64(int64_t volatile* ptr, int64_t val) { } double atomic_exchange_double(double volatile* ptr, double val) { + double ret = 0; + for (;;) { double_number old_num = {0}; old_num.d = *ptr; // current old value @@ -425,9 +427,11 @@ double atomic_exchange_double(double volatile* ptr, double val) { ret_num.i = atomic_val_compare_exchange_64((volatile int64_t*)ptr, old_num.i, new_num.i); if (ret_num.i == old_num.i) { - return ret_num.d; + ret = ret_num.d; + break; } } + return ret; } void* atomic_exchange_ptr(void* ptr, void* val) { @@ -589,6 +593,8 @@ int64_t atomic_fetch_add_64(int64_t volatile* ptr, int64_t val) { } double atomic_fetch_add_double(double volatile* ptr, double val) { + double ret = 0; + for (;;) { double_number old_num = {0}; old_num.d = *ptr; // current old value @@ -599,8 +605,13 @@ double atomic_fetch_add_double(double volatile* ptr, double val) { double_number ret_num = {0}; ret_num.i = atomic_val_compare_exchange_64((volatile int64_t*)ptr, old_num.i, new_num.i); - if (ret_num.i == old_num.i) return ret_num.d; + if (ret_num.i == old_num.i) { + ret = ret_num.d; + break; + } } + + return ret; } void* atomic_fetch_add_ptr(void* ptr, int64_t val) { @@ -710,6 +721,8 @@ int64_t atomic_fetch_sub_64(int64_t volatile* ptr, int64_t val) { } double atomic_fetch_sub_double(double volatile* ptr, double val) { + double ret = 0; + for (;;) { double_number old_num = {0}; old_num.d = *ptr; // current old value @@ -720,8 +733,13 @@ double atomic_fetch_sub_double(double volatile* ptr, double val) { double_number ret_num = {0}; ret_num.i = atomic_val_compare_exchange_64((volatile int64_t*)ptr, old_num.i, new_num.i); - if (ret_num.i == old_num.i) return ret_num.d; + if (ret_num.i == old_num.i) { + ret = ret_num.d; + break; + } } + + return ret; } void* atomic_fetch_sub_ptr(void* ptr, int64_t val) { diff --git a/source/os/src/osThread.c b/source/os/src/osThread.c index f888835d95..603f5da3f7 100644 --- a/source/os/src/osThread.c +++ b/source/os/src/osThread.c @@ -23,18 +23,16 @@ int32_t taosThreadCreate(TdThread *tid, const TdThreadAttr *attr, void *(*start) int32_t code = pthread_create(tid, attr, start, arg); if (code) { taosThreadClear(tid); - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } -int32_t taosThreadAttrDestroy(TdThreadAttr *attr) { +int32_t taosThreadAttrDestroy(TdThreadAttr *attr) { OS_PARAM_CHECK(attr); - int32_t code = pthread_attr_destroy(attr); + int32_t code = pthread_attr_destroy(attr); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } @@ -44,8 +42,7 @@ int32_t taosThreadAttrGetDetachState(const TdThreadAttr *attr, int32_t *detachst OS_PARAM_CHECK(detachstate); int32_t code = pthread_attr_getdetachstate(attr, detachstate); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } @@ -55,8 +52,7 @@ int32_t taosThreadAttrGetInheritSched(const TdThreadAttr *attr, int32_t *inherit OS_PARAM_CHECK(inheritsched); int32_t code = pthread_attr_getinheritsched(attr, inheritsched); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } @@ -66,8 +62,7 @@ int32_t taosThreadAttrGetSchedParam(const TdThreadAttr *attr, struct sched_param OS_PARAM_CHECK(param); int32_t code = pthread_attr_getschedparam(attr, param); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } @@ -77,8 +72,7 @@ int32_t taosThreadAttrGetSchedPolicy(const TdThreadAttr *attr, int32_t *policy) OS_PARAM_CHECK(policy); int32_t code = pthread_attr_getschedpolicy(attr, policy); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } @@ -88,8 +82,7 @@ int32_t taosThreadAttrGetScope(const TdThreadAttr *attr, int32_t *contentionscop OS_PARAM_CHECK(contentionscope); int32_t code = pthread_attr_getscope(attr, contentionscope); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } @@ -99,18 +92,16 @@ int32_t taosThreadAttrGetStackSize(const TdThreadAttr *attr, size_t *stacksize) OS_PARAM_CHECK(stacksize); int32_t code = pthread_attr_getstacksize(attr, stacksize); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } int32_t taosThreadAttrInit(TdThreadAttr *attr) { OS_PARAM_CHECK(attr); - int32_t code = pthread_attr_init(attr); + int32_t code = pthread_attr_init(attr); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } @@ -119,8 +110,7 @@ int32_t taosThreadAttrSetDetachState(TdThreadAttr *attr, int32_t detachstate) { OS_PARAM_CHECK(attr); int32_t code = pthread_attr_setdetachstate(attr, detachstate); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } @@ -129,8 +119,7 @@ int32_t taosThreadAttrSetInheritSched(TdThreadAttr *attr, int32_t inheritsched) OS_PARAM_CHECK(attr); int32_t code = pthread_attr_setinheritsched(attr, inheritsched); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } @@ -139,8 +128,7 @@ int32_t taosThreadAttrSetSchedParam(TdThreadAttr *attr, const struct sched_param OS_PARAM_CHECK(attr); int32_t code = pthread_attr_setschedparam(attr, param); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } @@ -149,8 +137,7 @@ int32_t taosThreadAttrSetSchedPolicy(TdThreadAttr *attr, int32_t policy) { OS_PARAM_CHECK(attr); int32_t code = pthread_attr_setschedpolicy(attr, policy); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } @@ -159,8 +146,7 @@ int32_t taosThreadAttrSetScope(TdThreadAttr *attr, int32_t contentionscope) { OS_PARAM_CHECK(attr); int32_t code = pthread_attr_setscope(attr, contentionscope); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } @@ -169,17 +155,15 @@ int32_t taosThreadAttrSetStackSize(TdThreadAttr *attr, size_t stacksize) { OS_PARAM_CHECK(attr); int32_t code = pthread_attr_setstacksize(attr, stacksize); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } -int32_t taosThreadCancel(TdThread thread) { - int32_t code = pthread_cancel(thread); +int32_t taosThreadCancel(TdThread thread) { + int32_t code = pthread_cancel(thread); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } @@ -191,8 +175,7 @@ int32_t taosThreadCondDestroy(TdThreadCond *cond) { #else int32_t code = pthread_cond_destroy(cond); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -206,8 +189,7 @@ int32_t taosThreadCondInit(TdThreadCond *cond, const TdThreadCondAttr *attr) { #else int32_t code = pthread_cond_init(cond, attr); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -221,8 +203,7 @@ int32_t taosThreadCondSignal(TdThreadCond *cond) { #else int32_t code = pthread_cond_signal(cond); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -236,8 +217,7 @@ int32_t taosThreadCondBroadcast(TdThreadCond *cond) { #else int32_t code = pthread_cond_broadcast(cond); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -254,8 +234,7 @@ int32_t taosThreadCondWait(TdThreadCond *cond, TdThreadMutex *mutex) { #else int32_t code = pthread_cond_wait(cond, mutex); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -274,7 +253,7 @@ int32_t taosThreadCondTimedWait(TdThreadCond *cond, TdThreadMutex *mutex, const return TAOS_SYSTEM_WINAPI_ERROR(error); #else int32_t code = pthread_cond_timedwait(cond, mutex, abstime); - if(code == ETIMEDOUT) { + if (code == ETIMEDOUT) { return TSDB_CODE_TIMEOUT_ERROR; } else if (code) { return TAOS_SYSTEM_ERROR(code); @@ -291,14 +270,14 @@ int32_t taosThreadCondAttrDestroy(TdThreadCondAttr *attr) { OS_PARAM_CHECK(attr); int32_t code = pthread_condattr_destroy(attr); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif } int32_t taosThreadCondAttrGetPshared(const TdThreadCondAttr *attr, int32_t *pshared) { + OS_PARAM_CHECK(attr); OS_PARAM_CHECK(pshared); #ifdef __USE_WIN_THREAD if (pshared) *pshared = PTHREAD_PROCESS_PRIVATE; @@ -307,8 +286,7 @@ int32_t taosThreadCondAttrGetPshared(const TdThreadCondAttr *attr, int32_t *psha OS_PARAM_CHECK(attr); int32_t code = pthread_condattr_getpshared(attr, pshared); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -321,8 +299,7 @@ int32_t taosThreadCondAttrInit(TdThreadCondAttr *attr) { OS_PARAM_CHECK(attr); int32_t code = pthread_condattr_init(attr); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -337,8 +314,7 @@ int32_t taosThreadCondAttrSetclock(TdThreadCondAttr *attr, int clockId) { OS_PARAM_CHECK(attr); int32_t code = pthread_condattr_setclock(attr, clockId); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -351,28 +327,24 @@ int32_t taosThreadCondAttrSetPshared(TdThreadCondAttr *attr, int32_t pshared) { #else int32_t code = pthread_condattr_setpshared(attr, pshared); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif } -int32_t taosThreadDetach(TdThread thread) { - int32_t code = pthread_detach(thread); +int32_t taosThreadDetach(TdThread thread) { + int32_t code = pthread_detach(thread); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } -int32_t taosThreadEqual(TdThread t1, TdThread t2) { - return pthread_equal(t1, t2); -} +int32_t taosThreadEqual(TdThread t1, TdThread t2) { return pthread_equal(t1, t2); } -void taosThreadExit(void *valuePtr) { - if(valuePtr) return pthread_exit(valuePtr); +void taosThreadExit(void *valuePtr) { + if (valuePtr) return pthread_exit(valuePtr); } int32_t taosThreadGetSchedParam(TdThread thread, int32_t *policy, struct sched_param *param) { @@ -380,21 +352,17 @@ int32_t taosThreadGetSchedParam(TdThread thread, int32_t *policy, struct sched_p OS_PARAM_CHECK(param); int32_t code = pthread_getschedparam(thread, policy, param); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } -void *taosThreadGetSpecific(TdThreadKey key) { - return pthread_getspecific(key); -} +void *taosThreadGetSpecific(TdThreadKey key) { return pthread_getspecific(key); } -int32_t taosThreadJoin(TdThread thread, void **valuePtr) { - int32_t code = pthread_join(thread, valuePtr); +int32_t taosThreadJoin(TdThread thread, void **valuePtr) { + int32_t code = pthread_join(thread, valuePtr); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } @@ -403,26 +371,23 @@ int32_t taosThreadKeyCreate(TdThreadKey *key, void (*destructor)(void *)) { OS_PARAM_CHECK(key); int32_t code = pthread_key_create(key, destructor); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } -int32_t taosThreadKeyDelete(TdThreadKey key) { - int32_t code = pthread_key_delete(key); +int32_t taosThreadKeyDelete(TdThreadKey key) { + int32_t code = pthread_key_delete(key); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } -int32_t taosThreadKill(TdThread thread, int32_t sig) { - int32_t code = pthread_kill(thread, sig); +int32_t taosThreadKill(TdThread thread, int32_t sig) { + int32_t code = pthread_kill(thread, sig); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } @@ -439,8 +404,7 @@ int32_t taosThreadMutexDestroy(TdThreadMutex *mutex) { #else int32_t code = pthread_mutex_destroy(mutex); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -459,8 +423,7 @@ int32_t taosThreadMutexInit(TdThreadMutex *mutex, const TdThreadMutexAttr *attr) #else int32_t code = pthread_mutex_init(mutex, attr); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -474,8 +437,7 @@ int32_t taosThreadMutexLock(TdThreadMutex *mutex) { #else int32_t code = pthread_mutex_lock(mutex); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -507,8 +469,7 @@ int32_t taosThreadMutexUnlock(TdThreadMutex *mutex) { #else int32_t code = pthread_mutex_unlock(mutex); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -521,8 +482,7 @@ int32_t taosThreadMutexAttrDestroy(TdThreadMutexAttr *attr) { OS_PARAM_CHECK(attr); int32_t code = pthread_mutexattr_destroy(attr); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -537,8 +497,7 @@ int32_t taosThreadMutexAttrGetPshared(const TdThreadMutexAttr *attr, int32_t *ps OS_PARAM_CHECK(attr); int32_t code = pthread_mutexattr_getpshared(attr, pshared); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -557,8 +516,7 @@ int32_t taosThreadMutexAttrGetType(const TdThreadMutexAttr *attr, int32_t *kind) OS_PARAM_CHECK(attr); int32_t code = pthread_mutexattr_gettype(attr, kind); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -571,8 +529,7 @@ int32_t taosThreadMutexAttrInit(TdThreadMutexAttr *attr) { OS_PARAM_CHECK(attr); int32_t code = pthread_mutexattr_init(attr); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -585,8 +542,7 @@ int32_t taosThreadMutexAttrSetPshared(TdThreadMutexAttr *attr, int32_t pshared) OS_PARAM_CHECK(attr); int32_t code = pthread_mutexattr_setpshared(attr, pshared); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -603,8 +559,7 @@ int32_t taosThreadMutexAttrSetType(TdThreadMutexAttr *attr, int32_t kind) { OS_PARAM_CHECK(attr); int32_t code = pthread_mutexattr_settype(attr, kind); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -613,8 +568,7 @@ int32_t taosThreadMutexAttrSetType(TdThreadMutexAttr *attr, int32_t kind) { int32_t taosThreadOnce(TdThreadOnce *onceControl, void (*initRoutine)(void)) { int32_t code = pthread_once(onceControl, initRoutine); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } @@ -626,11 +580,10 @@ int32_t taosThreadRwlockDestroy(TdThreadRwlock *rwlock) { */ return 0; #else - OS_PARAM_CHECK(rwlock); + OS_PARAM_CHECK(rwlock); int32_t code = pthread_rwlock_destroy(rwlock); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -645,8 +598,7 @@ int32_t taosThreadRwlockInit(TdThreadRwlock *rwlock, const TdThreadRwlockAttr *a #else int32_t code = pthread_rwlock_init(rwlock, attr); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -660,8 +612,7 @@ int32_t taosThreadRwlockRdlock(TdThreadRwlock *rwlock) { #else int32_t code = pthread_rwlock_rdlock(rwlock); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -683,8 +634,7 @@ int32_t taosThreadRwlockTryRdlock(TdThreadRwlock *rwlock) { #else int32_t code = pthread_rwlock_tryrdlock(rwlock); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -699,8 +649,7 @@ int32_t taosThreadRwlockTryWrlock(TdThreadRwlock *rwlock) { #else int32_t code = pthread_rwlock_trywrlock(rwlock); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -718,8 +667,7 @@ int32_t taosThreadRwlockUnlock(TdThreadRwlock *rwlock) { #else int32_t code = pthread_rwlock_unlock(rwlock); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -734,8 +682,7 @@ int32_t taosThreadRwlockWrlock(TdThreadRwlock *rwlock) { #else int32_t code = pthread_rwlock_wrlock(rwlock); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -748,14 +695,14 @@ int32_t taosThreadRwlockAttrDestroy(TdThreadRwlockAttr *attr) { OS_PARAM_CHECK(attr); int32_t code = pthread_rwlockattr_destroy(attr); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif } int32_t taosThreadRwlockAttrGetPshared(const TdThreadRwlockAttr *attr, int32_t *pshared) { + OS_PARAM_CHECK(attr); OS_PARAM_CHECK(pshared); #ifdef __USE_WIN_THREAD if (pshared) *pshared = PTHREAD_PROCESS_PRIVATE; @@ -763,8 +710,7 @@ int32_t taosThreadRwlockAttrGetPshared(const TdThreadRwlockAttr *attr, int32_t * #else int32_t code = pthread_rwlockattr_getpshared(attr, pshared); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -777,8 +723,7 @@ int32_t taosThreadRwlockAttrInit(TdThreadRwlockAttr *attr) { OS_PARAM_CHECK(attr); int32_t code = pthread_rwlockattr_init(attr); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -791,8 +736,7 @@ int32_t taosThreadRwlockAttrSetPshared(TdThreadRwlockAttr *attr, int32_t pshared OS_PARAM_CHECK(attr); int32_t code = pthread_rwlockattr_setpshared(attr, pshared); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -800,20 +744,18 @@ int32_t taosThreadRwlockAttrSetPshared(TdThreadRwlockAttr *attr, int32_t pshared TdThread taosThreadSelf(void) { return pthread_self(); } -int32_t taosThreadSetCancelState(int32_t state, int32_t *oldstate) { - int32_t code = pthread_setcancelstate(state, oldstate); +int32_t taosThreadSetCancelState(int32_t state, int32_t *oldstate) { + int32_t code = pthread_setcancelstate(state, oldstate); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } -int32_t taosThreadSetCancelType(int32_t type, int32_t *oldtype) { - int32_t code = pthread_setcanceltype(type, oldtype); +int32_t taosThreadSetCancelType(int32_t type, int32_t *oldtype) { + int32_t code = pthread_setcanceltype(type, oldtype); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } @@ -822,18 +764,16 @@ int32_t taosThreadSetSchedParam(TdThread thread, int32_t policy, const struct sc OS_PARAM_CHECK(param); int32_t code = pthread_setschedparam(thread, policy, param); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } -int32_t taosThreadSetSpecific(TdThreadKey key, const void *value) { +int32_t taosThreadSetSpecific(TdThreadKey key, const void *value) { OS_PARAM_CHECK(value); - int32_t code = pthread_setspecific(key, value); + int32_t code = pthread_setspecific(key, value); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; } @@ -845,8 +785,7 @@ int32_t taosThreadSpinDestroy(TdThreadSpinlock *lock) { #else int32_t code = pthread_spin_destroy((pthread_spinlock_t *)lock); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -860,8 +799,7 @@ int32_t taosThreadSpinInit(TdThreadSpinlock *lock, int32_t pshared) { #else int32_t code = pthread_spin_init((pthread_spinlock_t *)lock, pshared); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -874,8 +812,7 @@ int32_t taosThreadSpinLock(TdThreadSpinlock *lock) { #else int32_t code = pthread_spin_lock((pthread_spinlock_t *)lock); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif @@ -901,20 +838,17 @@ int32_t taosThreadSpinUnlock(TdThreadSpinlock *lock) { #else int32_t code = pthread_spin_unlock((pthread_spinlock_t *)lock); if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - return terrno; + return (terrno = TAOS_SYSTEM_ERROR(code)); } return code; #endif } -void taosThreadTestCancel(void) { - return pthread_testcancel(); -} +void taosThreadTestCancel(void) { return pthread_testcancel(); } -void taosThreadClear(TdThread *thread) { +void taosThreadClear(TdThread *thread) { if (!thread) return; - (void)memset(thread, 0, sizeof(TdThread)); + (void)memset(thread, 0, sizeof(TdThread)); } #ifdef WINDOWS diff --git a/source/os/src/osTime.c b/source/os/src/osTime.c index 29cbcaeb2c..75cb2b91a2 100644 --- a/source/os/src/osTime.c +++ b/source/os/src/osTime.c @@ -31,7 +31,7 @@ #include #include #include -//#define TM_YEAR_BASE 1970 //origin +// #define TM_YEAR_BASE 1970 //origin #define TM_YEAR_BASE 1900 // slguan // This magic number is the number of 100 nanosecond intervals since January 1, 1601 (UTC) @@ -345,8 +345,7 @@ char *taosStrpTime(const char *buf, const char *fmt, struct tm *tm) { #endif } -size_t -taosStrfTime(char *s, size_t maxsize, char const *format, struct tm const *t){ +size_t taosStrfTime(char *s, size_t maxsize, char const *format, struct tm const *t) { if (!s || !format || !t) return 0; return strftime(s, maxsize, format, t); } @@ -379,7 +378,7 @@ int32_t taosTime(time_t *t) { if (t == NULL) { return TSDB_CODE_INVALID_PARA; } - time_t r = time(t); + time_t r = time(t); if (r == (time_t)-1) { return TAOS_SYSTEM_ERROR(errno); } @@ -433,15 +432,15 @@ time_t taosMktime(struct tm *timep, timezone_t tz) { return result; } int64_t tzw = 0; - #ifdef _MSC_VER - #if _MSC_VER >= 1900 - tzw = _timezone; - #endif - #endif +#ifdef _MSC_VER +#if _MSC_VER >= 1900 + tzw = _timezone; +#endif +#endif return user_mktime64(timep->tm_year + 1900, timep->tm_mon + 1, timep->tm_mday, timep->tm_hour, timep->tm_min, timep->tm_sec, tzw); #else - time_t r = tz != NULL ? mktime_z(tz, timep) : mktime(timep); + time_t r = (tz != NULL ? mktime_z(tz, timep) : mktime(timep)); if (r == (time_t)-1) { terrno = TAOS_SYSTEM_ERROR(errno); } @@ -450,7 +449,7 @@ time_t taosMktime(struct tm *timep, timezone_t tz) { #endif } -struct tm *taosGmTimeR(const time_t *timep, struct tm *result){ +struct tm *taosGmTimeR(const time_t *timep, struct tm *result) { if (timep == NULL || result == NULL) { return NULL; } @@ -461,7 +460,7 @@ struct tm *taosGmTimeR(const time_t *timep, struct tm *result){ #endif } -time_t taosTimeGm(struct tm *tmp){ +time_t taosTimeGm(struct tm *tmp) { if (tmp == NULL) { return -1; } @@ -530,7 +529,7 @@ struct tm *taosLocalTime(const time_t *timep, struct tm *result, char *buf, int3 } return result; #else - res = tz != NULL ? localtime_rz(tz, timep, result): localtime_r(timep, result); + res = (tz != NULL ? localtime_rz(tz, timep, result) : localtime_r(timep, result)); if (res == NULL && buf != NULL) { (void)snprintf(buf, bufSize, "NaN"); } @@ -544,8 +543,8 @@ int32_t taosGetTimestampSec() { return (int32_t)time(NULL); } int32_t taosClockGetTime(int clock_id, struct timespec *pTS) { int32_t code = 0; #ifdef WINDOWS - LARGE_INTEGER t; - FILETIME f; + LARGE_INTEGER t; + FILETIME f; GetSystemTimeAsFileTime(&f); t.QuadPart = f.dwHighDateTime; diff --git a/source/os/src/osTimezone.c b/source/os/src/osTimezone.c index 395c1cbb82..cad3f426f2 100644 --- a/source/os/src/osTimezone.c +++ b/source/os/src/osTimezone.c @@ -750,13 +750,14 @@ int32_t taosSetGlobalTimezone(const char *tz) { int32_t code = TSDB_CODE_SUCCESS; uDebug("[tz]set timezone to %s", tz) #ifdef WINDOWS - char winStr[TD_TIMEZONE_LEN * 2] = {0}; + char winStr[TD_TIMEZONE_LEN * 2] = {0}; for (size_t i = 0; i < W_TZ_CITY_NUM; i++) { if (strcmp(tz_win[i][0], tz) == 0) { char keyPath[256] = {0}; char keyValue[100] = {0}; DWORD keyValueSize = sizeof(keyValue); - snprintf(keyPath, sizeof(keyPath), "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Time Zones\\%s", tz_win[i][1]); + snprintf(keyPath, sizeof(keyPath), "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Time Zones\\%s", + tz_win[i][1]); RegGetValue(HKEY_LOCAL_MACHINE, keyPath, "Display", RRF_RT_ANY, NULL, (PVOID)&keyValue, &keyValueSize); if (keyValueSize > 0) { keyValue[4] = (keyValue[4] == '+' ? '-' : '+'); @@ -770,7 +771,7 @@ int32_t taosSetGlobalTimezone(const char *tz) { _putenv(winStr); _tzset(); #else - code = setenv("TZ", tz, 1); + code = setenv("TZ", tz, 1); if (-1 == code) { terrno = TAOS_SYSTEM_ERROR(errno); return terrno; @@ -779,7 +780,7 @@ int32_t taosSetGlobalTimezone(const char *tz) { tzset(); #endif - time_t tx1 = taosGetTimestampSec(); + time_t tx1 = taosGetTimestampSec(); return taosFormatTimezoneStr(tx1, tz, NULL, tsTimezoneStr); } @@ -797,7 +798,7 @@ int32_t taosGetLocalTimezoneOffset() { #endif } -int32_t taosFormatTimezoneStr(time_t t, const char* tz, timezone_t sp, char *outTimezoneStr){ +int32_t taosFormatTimezoneStr(time_t t, const char *tz, timezone_t sp, char *outTimezoneStr) { struct tm tm1; if (taosLocalTime(&t, &tm1, NULL, 0, sp) == NULL) { uError("%s failed to get local time: code:%d", __FUNCTION__, errno); @@ -813,16 +814,17 @@ int32_t taosFormatTimezoneStr(time_t t, const char* tz, timezone_t sp, char *out */ char str1[TD_TIMEZONE_LEN] = {0}; - if (taosStrfTime(str1, sizeof(str1), "%Z", &tm1) == 0){ + if (taosStrfTime(str1, sizeof(str1), "%Z", &tm1) == 0) { uError("failed to get timezone name"); return TSDB_CODE_TIME_ERROR; } char str2[TD_TIMEZONE_LEN] = {0}; - if (taosStrfTime(str2, sizeof(str2), "%z", &tm1) == 0){ + if (taosStrfTime(str2, sizeof(str2), "%z", &tm1) == 0) { uError("failed to get timezone offset"); return TSDB_CODE_TIME_ERROR; } + (void)snprintf(outTimezoneStr, TD_TIMEZONE_LEN, "%s (%s, %s)", tz, str1, str2); uDebug("[tz] system timezone:%s", outTimezoneStr); return 0; @@ -847,7 +849,6 @@ void getTimezoneStr(char *tz) { goto END; } while (0); - TdFilePtr pFile = taosOpenFile("/etc/timezone", TD_FILE_READ); if (pFile == NULL) { uWarn("[tz] failed to open /etc/timezone, reason:%s", strerror(errno)); @@ -876,8 +877,8 @@ int32_t taosGetSystemTimezone(char *outTimezoneStr) { char value[100] = {0}; char keyPath[100] = {0}; DWORD bufferSize = sizeof(value); - LONG result = RegGetValue(HKEY_LOCAL_MACHINE, "SYSTEM\\CurrentControlSet\\Control\\TimeZoneInformation", "TimeZoneKeyName", - RRF_RT_ANY, NULL, (PVOID)&value, &bufferSize); + LONG result = RegGetValue(HKEY_LOCAL_MACHINE, "SYSTEM\\CurrentControlSet\\Control\\TimeZoneInformation", + "TimeZoneKeyName", RRF_RT_ANY, NULL, (PVOID)&value, &bufferSize); if (result != ERROR_SUCCESS) { return TAOS_SYSTEM_WINAPI_ERROR(result); } @@ -891,9 +892,9 @@ int32_t taosGetSystemTimezone(char *outTimezoneStr) { if (result != ERROR_SUCCESS) { return TAOS_SYSTEM_WINAPI_ERROR(result); } - if (bufferSize > 0) { // value like (UTC+05:30) Chennai, Kolkata, Mumbai, New Delhi - snprintf(outTimezoneStr, TD_TIMEZONE_LEN, "%s (UTC, %c%c%c%c%c)", outTimezoneStr, - value[4], value[5], value[6], value[8], value[9]); + if (bufferSize > 0) { // value like (UTC+05:30) Chennai, Kolkata, Mumbai, New Delhi + snprintf(outTimezoneStr, TD_TIMEZONE_LEN, "%s (UTC, %c%c%c%c%c)", outTimezoneStr, value[4], value[5], + value[6], value[8], value[9]); } break; } @@ -903,7 +904,7 @@ int32_t taosGetSystemTimezone(char *outTimezoneStr) { #else char tz[TD_TIMEZONE_LEN] = {0}; getTimezoneStr(tz); - time_t tx1 = taosGetTimestampSec(); + time_t tx1 = taosGetTimestampSec(); return taosFormatTimezoneStr(tx1, tz, NULL, outTimezoneStr); #endif } \ No newline at end of file diff --git a/source/os/test/CMakeLists.txt b/source/os/test/CMakeLists.txt index 13fea463f7..d592168166 100644 --- a/source/os/test/CMakeLists.txt +++ b/source/os/test/CMakeLists.txt @@ -14,20 +14,32 @@ ENDIF() INCLUDE_DIRECTORIES(${TD_SOURCE_DIR}/src/util/inc) -# osTests -add_executable(osTests "osTests.cpp") -target_link_libraries(osTests os util gtest_main) +if(TD_LINUX) +add_executable(osAtomicTests "osAtomicTests.cpp") +target_link_libraries(osAtomicTests os util gtest_main) add_test( - NAME osTests - COMMAND osTests + NAME osAtomicTests + COMMAND osAtomicTests ) +endif() -add_executable(osSystemTests "osSystemTests.cpp") -target_link_libraries(osSystemTests os util gtest_main) +if(TD_LINUX) +add_executable(osDirTests "osDirTests.cpp") +target_link_libraries(osDirTests os util gtest_main) add_test( - NAME osSystemTests - COMMAND osSystemTests + NAME osDirTests + COMMAND osDirTests ) +endif() + +if(TD_LINUX) +add_executable(osEnvTests "osEnvTests.cpp") +target_link_libraries(osEnvTests os util gtest_main) +add_test( + NAME osEnvTests + COMMAND osEnvTests +) +endif() add_executable(osMathTests "osMathTests.cpp") target_link_libraries(osMathTests os util gtest_main) @@ -36,6 +48,13 @@ add_test( COMMAND osMathTests ) +add_executable(osSemaphoreTests "osSemaphoreTests.cpp") +target_link_libraries(osSemaphoreTests os util gtest_main) +add_test( + NAME osSemaphoreTests + COMMAND osSemaphoreTests +) + add_executable(osSignalTests "osSignalTests.cpp") target_link_libraries(osSignalTests os util gtest_main) add_test( @@ -57,12 +76,28 @@ add_test( COMMAND osStringTests ) +add_executable(osTests "osTests.cpp") +target_link_libraries(osTests os util gtest_main) +add_test( + NAME osTests + COMMAND osTests +) + +add_executable(osSystemTests "osSystemTests.cpp") +target_link_libraries(osSystemTests os util gtest_main) +add_test( + NAME osSystemTests + COMMAND osSystemTests +) + +if(TD_LINUX) add_executable(osThreadTests "osThreadTests.cpp") target_link_libraries(osThreadTests os util gtest_main) add_test( NAME osThreadTests COMMAND osThreadTests ) +endif() add_executable(osTimeTests "osTimeTests.cpp") target_link_libraries(osTimeTests os util gtest_main) @@ -71,35 +106,3 @@ add_test( COMMAND osTimeTests ) - -if(TD_LINUX) - -add_executable(osAtomicTests "osAtomicTests.cpp") -target_link_libraries(osAtomicTests os util gtest_main) -add_test( - NAME osAtomicTests - COMMAND osAtomicTests -) - -add_executable(osDirTests "osDirTests.cpp") -target_link_libraries(osDirTests os util gtest_main) -add_test( - NAME osDirTests - COMMAND osDirTests -) - -add_executable(osEnvTests "osEnvTests.cpp") -target_link_libraries(osEnvTests os util gtest_main) -add_test( - NAME osEnvTests - COMMAND osEnvTests -) - -endif() - -add_executable(osSemaphoreTests "osSemaphoreTests.cpp") -target_link_libraries(osSemaphoreTests os util gtest_main) -add_test( - NAME osSemaphoreTests - COMMAND osSemaphoreTests -) diff --git a/source/os/test/osThreadTests.cpp b/source/os/test/osThreadTests.cpp index e7fc4f1356..20964c86bc 100644 --- a/source/os/test/osThreadTests.cpp +++ b/source/os/test/osThreadTests.cpp @@ -29,6 +29,473 @@ #include "os.h" #include "tlog.h" -TEST(osThreadTests, osThreadTests1) { +static int32_t globalVar = 0; +static void funcPtrKey(void *param) { taosMsleep(100); } + +static void *funcPtr200(void *param) { + TdThread thread = taosThreadSelf(); + + TdThreadKey key = {0}; + taosThreadKeyCreate(&key, funcPtrKey); + void *oldVal = taosThreadGetSpecific(key); + taosThreadSetSpecific(key, oldVal); + taosThreadKeyDelete(key); + + int32_t oldType = 0; + taosThreadSetCancelType(-1, &oldType); + taosThreadSetCancelType(0, &oldType); + + int32_t oldState = 0; + taosThreadSetCancelState(-1, &oldState); + taosThreadSetCancelState(0, &oldState); + + int32_t policy; + struct sched_param para; + taosThreadGetSchedParam(thread, &policy, ¶); + taosThreadGetSchedParam(thread, NULL, ¶); + taosThreadGetSchedParam(thread, &policy, NULL); + // taosThreadSetSchedParam(NULL, 0, ¶); + taosThreadSetSchedParam(thread, 0, ¶); + taosMsleep(200); + + return NULL; } + +static void *funcPtr501(void *param) { + taosMsleep(500); + TdThread thread = taosThreadSelf(); + return NULL; +} + +static void *funcPtr502(void *param) { + taosMsleep(500); + TdThread thread = taosThreadSelf(); + return NULL; +} + +static void *funcPtr503(void *param) { + taosMsleep(500); + TdThread thread = taosThreadSelf(); + return NULL; +} + +static void *funcPtr504(void *param) { + taosMsleep(500); + TdThread thread = taosThreadSelf(); + return NULL; +} + +static void *funcPtrExit1(void *param) { + taosThreadExit(NULL); + return NULL; +} + +static void *funcPtrExit2(void *param) { + taosThreadExit(&globalVar); + return NULL; +} + +TEST(osThreadTests, thread) { + TdThread tid1 = {0}; + TdThread tid2 = {0}; + int32_t reti = 0; + + reti = taosThreadCreate(NULL, NULL, funcPtr200, NULL); + EXPECT_NE(reti, 0); + reti = taosThreadCreate(&tid1, NULL, NULL, NULL); + EXPECT_NE(reti, 0); + reti = taosThreadCreate(&tid1, NULL, funcPtr200, NULL); + EXPECT_EQ(reti, 0); + taosMsleep(300); + + (void)taosThreadCancel(tid1); + + reti = taosThreadCreate(&tid2, NULL, funcPtr501, NULL); + EXPECT_EQ(reti, 0); + taosMsleep(1000); + (void)taosThreadCancel(tid2); + + taosThreadDetach(tid1); + reti = taosThreadCreate(&tid2, NULL, funcPtr502, NULL); + EXPECT_EQ(reti, 0); + reti = taosThreadDetach(tid2); + + reti = taosThreadEqual(tid1, tid2); + EXPECT_NE(reti, 0); + + reti = taosThreadCreate(&tid2, NULL, funcPtrExit1, NULL); + EXPECT_EQ(reti, 0); + reti = taosThreadCreate(&tid2, NULL, funcPtrExit2, NULL); + EXPECT_EQ(reti, 0); + + taosMsleep(1000); + + // reti = taosThreadCreate(&tid2, NULL, funcPtr503, NULL); + // EXPECT_EQ(reti, 0); + // taosThreadKill(tid2, SIGINT); + + int32_t policy; + struct sched_param para; + taosThreadGetSchedParam(tid2, &policy, ¶); + taosThreadGetSchedParam(tid2, NULL, ¶); + taosThreadGetSchedParam(tid2, &policy, NULL); + // taosThreadSetSchedParam(NULL, 0, ¶); + taosThreadSetSchedParam(tid2, 0, ¶); + + TdThreadKey key = {0}; + taosThreadKeyCreate(&key, funcPtrKey); + void *oldVal = taosThreadGetSpecific(key); + taosThreadSetSpecific(key, oldVal); + taosThreadKeyDelete(key); +} + +TEST(osThreadTests, attr) { + int32_t reti = 0; + TdThreadAttr attr = {0}; + int32_t param = 0; + + reti = taosThreadAttrInit(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadAttrDestroy(NULL); + EXPECT_NE(reti, 0); + + (void)taosThreadAttrInit(&attr); + + reti = taosThreadAttrSetDetachState(&attr, PTHREAD_CREATE_JOINABLE); + EXPECT_EQ(reti, 0); + reti = taosThreadAttrSetDetachState(&attr, -1); + EXPECT_NE(reti, 0); + reti = taosThreadAttrSetDetachState(NULL, -1); + EXPECT_NE(reti, 0); + reti = taosThreadAttrGetDetachState(NULL, ¶m); + EXPECT_NE(reti, 0); + reti = taosThreadAttrGetDetachState(&attr, NULL); + EXPECT_NE(reti, 0); + reti = taosThreadAttrGetDetachState(&attr, ¶m); + EXPECT_EQ(reti, 0); + + reti = taosThreadAttrSetInheritSched(&attr, PTHREAD_INHERIT_SCHED); + EXPECT_EQ(reti, 0); + reti = taosThreadAttrSetInheritSched(&attr, -1); + EXPECT_NE(reti, 0); + reti = taosThreadAttrSetInheritSched(NULL, -1); + EXPECT_NE(reti, 0); + reti = taosThreadAttrGetInheritSched(NULL, ¶m); + EXPECT_NE(reti, 0); + reti = taosThreadAttrGetInheritSched(&attr, NULL); + EXPECT_NE(reti, 0); + reti = taosThreadAttrGetInheritSched(&attr, ¶m); + EXPECT_EQ(reti, 0); + + struct sched_param schedparam = {0}; + reti = taosThreadAttrGetSchedParam(&attr, &schedparam); + EXPECT_EQ(reti, 0); + reti = taosThreadAttrSetSchedParam(&attr, &schedparam); + EXPECT_EQ(reti, 0); + schedparam.sched_priority = -1; + reti = taosThreadAttrSetSchedParam(&attr, &schedparam); + EXPECT_NE(reti, 0); + reti = taosThreadAttrSetSchedParam(NULL, &schedparam); + EXPECT_NE(reti, 0); + reti = taosThreadAttrGetSchedParam(NULL, &schedparam); + EXPECT_NE(reti, 0); + reti = taosThreadAttrGetSchedParam(&attr, NULL); + EXPECT_NE(reti, 0); + + reti = taosThreadAttrSetSchedPolicy(&attr, SCHED_FIFO); + EXPECT_EQ(reti, 0); + reti = taosThreadAttrSetSchedPolicy(&attr, -1); + EXPECT_NE(reti, 0); + reti = taosThreadAttrSetSchedPolicy(NULL, -1); + EXPECT_NE(reti, 0); + reti = taosThreadAttrGetSchedPolicy(NULL, ¶m); + EXPECT_NE(reti, 0); + reti = taosThreadAttrGetSchedPolicy(&attr, NULL); + EXPECT_NE(reti, 0); + reti = taosThreadAttrGetSchedPolicy(&attr, ¶m); + EXPECT_EQ(reti, 0); + + reti = taosThreadAttrSetScope(&attr, PTHREAD_SCOPE_SYSTEM); + EXPECT_EQ(reti, 0); + reti = taosThreadAttrSetScope(&attr, -1); + EXPECT_NE(reti, 0); + reti = taosThreadAttrSetScope(NULL, -1); + EXPECT_NE(reti, 0); + reti = taosThreadAttrGetScope(NULL, ¶m); + EXPECT_NE(reti, 0); + reti = taosThreadAttrGetScope(&attr, NULL); + EXPECT_NE(reti, 0); + reti = taosThreadAttrGetScope(&attr, ¶m); + EXPECT_EQ(reti, 0); + + size_t stacksize; + reti = taosThreadAttrGetStackSize(&attr, &stacksize); + EXPECT_EQ(reti, 0); + reti = taosThreadAttrSetStackSize(&attr, stacksize); + EXPECT_EQ(reti, 0); + reti = taosThreadAttrSetStackSize(&attr, 2048); + EXPECT_NE(reti, 0); + reti = taosThreadAttrSetStackSize(NULL, stacksize); + EXPECT_NE(reti, 0); + reti = taosThreadAttrGetStackSize(NULL, &stacksize); + EXPECT_NE(reti, 0); + reti = taosThreadAttrGetStackSize(&attr, NULL); + EXPECT_NE(reti, 0); +} + +TEST(osThreadTests, cond) { + int32_t reti = 0; + + reti = taosThreadCondInit(NULL, NULL); + EXPECT_NE(reti, 0); + reti = taosThreadCondDestroy(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadCondSignal(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadCondBroadcast(NULL); + EXPECT_NE(reti, 0); + + TdThreadCond cond{0}; + TdThreadMutex mutex = {0}; + reti = taosThreadCondWait(&cond, NULL); + EXPECT_NE(reti, 0); + reti = taosThreadCondWait(NULL, &mutex); + EXPECT_NE(reti, 0); + + struct timespec abstime = {0}; + reti = taosThreadCondTimedWait(&cond, NULL, &abstime); + EXPECT_NE(reti, 0); + reti = taosThreadCondTimedWait(NULL, &mutex, &abstime); + EXPECT_NE(reti, 0); + reti = taosThreadCondTimedWait(&cond, &mutex, NULL); + EXPECT_EQ(reti, 0); + + TdThreadCondAttr condattr = {0}; + (void)taosThreadCondAttrInit(&condattr); + reti = taosThreadCondAttrInit(NULL); + EXPECT_NE(reti, 0); + int32_t pshared; + reti = taosThreadCondAttrGetPshared(&condattr, &pshared); + EXPECT_EQ(reti, 0); + reti = taosThreadCondAttrSetPshared(&condattr, pshared); + EXPECT_EQ(reti, 0); + reti = taosThreadCondAttrSetPshared(&condattr, -1); + EXPECT_NE(reti, 0); + reti = taosThreadCondAttrSetPshared(NULL, pshared); + EXPECT_NE(reti, 0); + reti = taosThreadCondAttrGetPshared(NULL, &pshared); + EXPECT_NE(reti, 0); + reti = taosThreadCondAttrGetPshared(&condattr, NULL); + EXPECT_NE(reti, 0); + + reti = taosThreadCondAttrSetclock(NULL, -1); + EXPECT_NE(reti, 0); + + reti = taosThreadCondAttrDestroy(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadCondAttrDestroy(&condattr); + EXPECT_EQ(reti, 0); +} + +TEST(osThreadTests, mutex) { + int32_t reti = 0; + TdThreadMutex mutex; + reti = taosThreadMutexInit(NULL, 0); + EXPECT_NE(reti, 0); + reti = taosThreadMutexInit(&mutex, 0); + EXPECT_EQ(reti, 0); + + reti = taosThreadMutexTryLock(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadMutexTryLock(&mutex); + EXPECT_EQ(reti, 0); + reti = taosThreadMutexTryLock(&mutex); + EXPECT_NE(reti, 0); + + reti = taosThreadMutexUnlock(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadMutexUnlock(&mutex); + EXPECT_EQ(reti, 0); + + reti = taosThreadMutexLock(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadMutexLock(&mutex); + EXPECT_EQ(reti, 0); + reti = taosThreadMutexUnlock(&mutex); + EXPECT_EQ(reti, 0); + + reti = taosThreadMutexDestroy(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadMutexDestroy(&mutex); + EXPECT_EQ(reti, 0); +} + +TEST(osThreadTests, mutexAttr) { + int32_t reti = 0; + TdThreadMutexAttr mutexAttr; + reti = taosThreadMutexAttrInit(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadMutexAttrInit(&mutexAttr); + EXPECT_EQ(reti, 0); + + int32_t pshared; + reti = taosThreadMutexAttrGetPshared(&mutexAttr, &pshared); + EXPECT_EQ(reti, 0); + reti = taosThreadMutexAttrSetPshared(&mutexAttr, pshared); + EXPECT_EQ(reti, 0); + reti = taosThreadMutexAttrSetPshared(&mutexAttr, -1); + EXPECT_NE(reti, 0); + reti = taosThreadMutexAttrSetPshared(NULL, pshared); + EXPECT_NE(reti, 0); + reti = taosThreadMutexAttrGetPshared(NULL, &pshared); + EXPECT_NE(reti, 0); + reti = taosThreadMutexAttrGetPshared(&mutexAttr, NULL); + EXPECT_NE(reti, 0); + + int32_t kind; + reti = taosThreadMutexAttrGetType(&mutexAttr, &kind); + EXPECT_EQ(reti, 0); + reti = taosThreadMutexAttrSetType(&mutexAttr, kind); + EXPECT_EQ(reti, 0); + reti = taosThreadMutexAttrSetType(&mutexAttr, -1); + EXPECT_NE(reti, 0); + reti = taosThreadMutexAttrSetType(NULL, kind); + EXPECT_NE(reti, 0); + reti = taosThreadMutexAttrGetType(NULL, &kind); + EXPECT_NE(reti, 0); + reti = taosThreadMutexAttrGetType(&mutexAttr, NULL); + EXPECT_NE(reti, 0); + + reti = taosThreadMutexAttrDestroy(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadMutexAttrDestroy(&mutexAttr); + EXPECT_EQ(reti, 0); +} + +TEST(osThreadTests, rwlock) { + int32_t reti = 0; + TdThreadRwlock rwlock; + reti = taosThreadRwlockInit(NULL, 0); + EXPECT_NE(reti, 0); + reti = taosThreadRwlockInit(&rwlock, 0); + EXPECT_EQ(reti, 0); + + reti = taosThreadRwlockTryRdlock(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadRwlockTryRdlock(&rwlock); + EXPECT_EQ(reti, 0); + + reti = taosThreadRwlockUnlock(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadRwlockUnlock(&rwlock); + EXPECT_EQ(reti, 0); + + reti = taosThreadRwlockRdlock(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadRwlockRdlock(&rwlock); + EXPECT_EQ(reti, 0); + reti = taosThreadRwlockUnlock(&rwlock); + EXPECT_EQ(reti, 0); + + reti = taosThreadRwlockDestroy(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadRwlockDestroy(&rwlock); + EXPECT_EQ(reti, 0); + + reti = taosThreadRwlockInit(NULL, 0); + EXPECT_NE(reti, 0); + reti = taosThreadRwlockInit(&rwlock, 0); + EXPECT_EQ(reti, 0); + + reti = taosThreadRwlockTryWrlock(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadRwlockTryWrlock(&rwlock); + EXPECT_EQ(reti, 0); + + reti = taosThreadRwlockUnlock(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadRwlockUnlock(&rwlock); + EXPECT_EQ(reti, 0); + + reti = taosThreadRwlockWrlock(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadRwlockWrlock(&rwlock); + EXPECT_EQ(reti, 0); + reti = taosThreadRwlockUnlock(&rwlock); + EXPECT_EQ(reti, 0); + + reti = taosThreadRwlockDestroy(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadRwlockDestroy(&rwlock); + EXPECT_EQ(reti, 0); +} + +TEST(osThreadTests, rdlockAttr) { + int32_t reti = 0; + TdThreadRwlockAttr rdlockAttr; + reti = taosThreadRwlockAttrInit(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadRwlockAttrInit(&rdlockAttr); + EXPECT_EQ(reti, 0); + + int32_t pshared; + reti = taosThreadRwlockAttrGetPshared(&rdlockAttr, &pshared); + EXPECT_EQ(reti, 0); + reti = taosThreadRwlockAttrSetPshared(&rdlockAttr, pshared); + EXPECT_EQ(reti, 0); + reti = taosThreadRwlockAttrSetPshared(&rdlockAttr, -1); + EXPECT_NE(reti, 0); + reti = taosThreadRwlockAttrSetPshared(NULL, pshared); + EXPECT_NE(reti, 0); + reti = taosThreadRwlockAttrGetPshared(NULL, &pshared); + EXPECT_NE(reti, 0); + reti = taosThreadRwlockAttrGetPshared(&rdlockAttr, NULL); + EXPECT_NE(reti, 0); + + reti = taosThreadRwlockAttrDestroy(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadRwlockAttrDestroy(&rdlockAttr); + EXPECT_EQ(reti, 0); +} + +TEST(osThreadTests, spinlock) { + int32_t reti = 0; + + TdThreadSpinlock lock = {0}; + reti = taosThreadSpinInit(&lock, -1); + EXPECT_EQ(reti, 0); + reti = taosThreadSpinLock(&lock); + EXPECT_EQ(reti, 0); + reti = taosThreadSpinTrylock(&lock); + EXPECT_NE(reti, 0); + reti = taosThreadSpinUnlock(&lock); + EXPECT_EQ(reti, 0); + reti = taosThreadSpinDestroy(&lock); + EXPECT_EQ(reti, 0); + + reti = taosThreadSpinInit(&lock, -1); + EXPECT_EQ(reti, 0); + reti = taosThreadSpinTrylock(&lock); + EXPECT_EQ(reti, 0); + reti = taosThreadSpinUnlock(&lock); + EXPECT_EQ(reti, 0); + reti = taosThreadSpinDestroy(&lock); + EXPECT_EQ(reti, 0); + + reti = taosThreadSpinInit(NULL, 0); + EXPECT_NE(reti, 0); + reti = taosThreadSpinLock(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadSpinTrylock(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadSpinUnlock(NULL); + EXPECT_NE(reti, 0); + reti = taosThreadSpinDestroy(NULL); + EXPECT_NE(reti, 0); +} + +TEST(osThreadTests, others) { + taosThreadTestCancel(); + taosThreadClear(NULL); +} \ No newline at end of file diff --git a/source/os/test/osTimeTests.cpp b/source/os/test/osTimeTests.cpp index 5c8c837dca..1d34587ad8 100644 --- a/source/os/test/osTimeTests.cpp +++ b/source/os/test/osTimeTests.cpp @@ -33,7 +33,7 @@ TEST(osTimeTests, taosLocalTime) { // Test 1: Test when both timep and result are not NULL time_t timep = 1617531000; // 2021-04-04 18:10:00 struct tm result; - struct tm* local_time = taosLocalTime(&timep, &result, NULL, 0, NULL); + struct tm *local_time = taosLocalTime(&timep, &result, NULL, 0, NULL); ASSERT_NE(local_time, nullptr); ASSERT_EQ(local_time->tm_year, 121); ASSERT_EQ(local_time->tm_mon, 3); @@ -92,4 +92,55 @@ TEST(osTimeTests, taosLocalTime) { local_time = taosLocalTime(&neg_timep3, &result, NULL, 0, NULL); ASSERT_EQ(local_time, nullptr); #endif +} + +TEST(osTimeTests, invalidParameter) { + void *retp = NULL; + int32_t reti = 0; + char buf[1024] = {0}; + char fmt[1024] = {0}; + struct tm tm = {0}; + struct timeval tv = {0}; + + retp = taosStrpTime(buf, fmt, NULL); + EXPECT_EQ(retp, nullptr); + retp = taosStrpTime(NULL, fmt, &tm); + EXPECT_EQ(retp, nullptr); + retp = taosStrpTime(buf, NULL, &tm); + EXPECT_EQ(retp, nullptr); + + reti = taosGetTimeOfDay(NULL); + EXPECT_NE(reti, 0); + + reti = taosTime(NULL); + EXPECT_NE(reti, 0); + + tm.tm_year = 2024; + tm.tm_mon = 10; + tm.tm_mday = 23; + tm.tm_hour = 12; + tm.tm_min = 1; + tm.tm_sec = 0; + tm.tm_isdst = -1; + time_t rett = taosMktime(&tm, NULL); + EXPECT_NE(rett, 0); + + retp = taosLocalTime(NULL, &tm, NULL, 0, NULL); + EXPECT_EQ(retp, nullptr); + + retp = taosLocalTime(&rett, NULL, NULL, 0, NULL); + EXPECT_EQ(retp, nullptr); + + reti = taosSetGlobalTimezone(NULL); + EXPECT_NE(reti, 0); +} + +TEST(osTimeTests, user_mktime64) { + int64_t reti = 0; + + reti = user_mktime64(2024, 10, 23, 12, 3, 2, 1); + EXPECT_NE(reti, 0); + + reti = user_mktime64(2024, 1, 23, 12, 3, 2, 1); + EXPECT_NE(reti, 0); } \ No newline at end of file diff --git a/source/util/src/tconfig.c b/source/util/src/tconfig.c index ee88996c29..52794af4dd 100644 --- a/source/util/src/tconfig.c +++ b/source/util/src/tconfig.c @@ -501,11 +501,13 @@ int32_t cfgGetAndSetItem(SConfig *pCfg, SConfigItem **pItem, const char *name, c *pItem = cfgGetItem(pCfg, name); if (*pItem == NULL) { - (void)taosThreadMutexUnlock(&pCfg->lock); - TAOS_RETURN(TSDB_CODE_CFG_NOT_FOUND); + code = TSDB_CODE_CFG_NOT_FOUND; + goto _exit; } - TAOS_CHECK_RETURN(cfgSetItemVal(*pItem, name, value, stype)); + TAOS_CHECK_GOTO(cfgSetItemVal(*pItem, name, value, stype), NULL, _exit); + +_exit: if (lock) { (void)taosThreadMutexUnlock(&pCfg->lock); } diff --git a/source/util/src/tdecompressavx.c b/source/util/src/tdecompressavx.c index 143867b783..5077950c5d 100644 --- a/source/util/src/tdecompressavx.c +++ b/source/util/src/tdecompressavx.c @@ -22,7 +22,7 @@ char tsSIMDEnable = 0; #endif int32_t tsDecompressIntImpl_Hw(const char *const input, const int32_t nelements, char *const output, const char type) { -#ifdef __AVX2__ +#ifdef __AVX512F__ int32_t word_length = getWordLength(type); // Selector value: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 @@ -53,183 +53,79 @@ int32_t tsDecompressIntImpl_Hw(const char *const input, const int32_t nelements, int32_t gRemainder = (nelements - _pos); int32_t num = (gRemainder > elems) ? elems : gRemainder; - int32_t batch = 0; - int32_t remain = 0; - if (tsSIMDEnable && tsAVX512Supported && tsAVX512Enable) { -#ifdef __AVX512F__ - batch = num >> 3; - remain = num & 0x07; -#endif - } else if (tsSIMDEnable && tsAVX2Supported) { -#ifdef __AVX2__ - batch = num >> 2; - remain = num & 0x03; -#endif - } + int32_t batch = num >> 3; + int32_t remain = num & 0x07; if (selector == 0 || selector == 1) { - if (tsSIMDEnable && tsAVX512Supported && tsAVX512Enable) { -#ifdef __AVX512F__ - for (int32_t i = 0; i < batch; ++i) { - __m512i prev = _mm512_set1_epi64(prevValue); - _mm512_storeu_si512((__m512i *)&p[_pos], prev); - _pos += 8; // handle 64bit x 8 = 512bit - } - for (int32_t i = 0; i < remain; ++i) { - p[_pos++] = prevValue; - } -#endif - } else if (tsSIMDEnable && tsAVX2Supported) { - for (int32_t i = 0; i < batch; ++i) { - __m256i prev = _mm256_set1_epi64x(prevValue); - _mm256_storeu_si256((__m256i *)&p[_pos], prev); - _pos += 4; - } - - for (int32_t i = 0; i < remain; ++i) { - p[_pos++] = prevValue; - } - - } else { // alternative implementation without SIMD instructions. - for (int32_t i = 0; i < elems && count < nelements; i++, count++) { - p[_pos++] = prevValue; - v += bit; - } + for (int32_t i = 0; i < batch; ++i) { + __m512i prev = _mm512_set1_epi64(prevValue); + _mm512_storeu_si512((__m512i *)&p[_pos], prev); + _pos += 8; // handle 64bit x 8 = 512bit + } + for (int32_t i = 0; i < remain; ++i) { + p[_pos++] = prevValue; } } else { - if (tsSIMDEnable && tsAVX512Supported && tsAVX512Enable) { -#ifdef __AVX512F__ - __m512i sum_mask1 = _mm512_set_epi64(6, 6, 4, 4, 2, 2, 0, 0); - __m512i sum_mask2 = _mm512_set_epi64(5, 5, 5, 5, 1, 1, 1, 1); - __m512i sum_mask3 = _mm512_set_epi64(3, 3, 3, 3, 3, 3, 3, 3); - __m512i base = _mm512_set1_epi64(w); - __m512i maskVal = _mm512_set1_epi64(mask); - __m512i shiftBits = _mm512_set_epi64(bit * 7 + 4, bit * 6 + 4, bit * 5 + 4, bit * 4 + 4, bit * 3 + 4, - bit * 2 + 4, bit + 4, 4); - __m512i inc = _mm512_set1_epi64(bit << 3); + __m512i sum_mask1 = _mm512_set_epi64(6, 6, 4, 4, 2, 2, 0, 0); + __m512i sum_mask2 = _mm512_set_epi64(5, 5, 5, 5, 1, 1, 1, 1); + __m512i sum_mask3 = _mm512_set_epi64(3, 3, 3, 3, 3, 3, 3, 3); + __m512i base = _mm512_set1_epi64(w); + __m512i maskVal = _mm512_set1_epi64(mask); + __m512i shiftBits = _mm512_set_epi64(bit * 7 + 4, bit * 6 + 4, bit * 5 + 4, bit * 4 + 4, bit * 3 + 4, + bit * 2 + 4, bit + 4, 4); + __m512i inc = _mm512_set1_epi64(bit << 3); - for (int32_t i = 0; i < batch; ++i) { - __m512i after = _mm512_srlv_epi64(base, shiftBits); - __m512i zigzagVal = _mm512_and_si512(after, maskVal); + for (int32_t i = 0; i < batch; ++i) { + __m512i after = _mm512_srlv_epi64(base, shiftBits); + __m512i zigzagVal = _mm512_and_si512(after, maskVal); - // ZIGZAG_DECODE(T, v) (((v) >> 1) ^ -((T)((v)&1))) - __m512i signmask = _mm512_and_si512(_mm512_set1_epi64(1), zigzagVal); - signmask = _mm512_sub_epi64(_mm512_setzero_si512(), signmask); - __m512i delta = _mm512_xor_si512(_mm512_srli_epi64(zigzagVal, 1), signmask); + // ZIGZAG_DECODE(T, v) (((v) >> 1) ^ -((T)((v)&1))) + __m512i signmask = _mm512_and_si512(_mm512_set1_epi64(1), zigzagVal); + signmask = _mm512_sub_epi64(_mm512_setzero_si512(), signmask); + __m512i delta = _mm512_xor_si512(_mm512_srli_epi64(zigzagVal, 1), signmask); - // calculate the cumulative sum (prefix sum) for each number - // decode[0] = prevValue + final[0] - // decode[1] = decode[0] + final[1] -----> prevValue + final[0] + final[1] - // decode[2] = decode[1] + final[2] -----> prevValue + final[0] + final[1] + final[2] - // decode[3] = decode[2] + final[3] -----> prevValue + final[0] + final[1] + final[2] + final[3] + // calculate the cumulative sum (prefix sum) for each number + // decode[0] = prevValue + final[0] + // decode[1] = decode[0] + final[1] -----> prevValue + final[0] + final[1] + // decode[2] = decode[1] + final[2] -----> prevValue + final[0] + final[1] + final[2] + // decode[3] = decode[2] + final[3] -----> prevValue + final[0] + final[1] + final[2] + final[3] - // 7 6 5 4 3 2 1 - // 0 D7 D6 D5 D4 D3 D2 D1 - // D0 D6 0 D4 0 D2 0 D0 - // 0 D7+D6 D6 D5+D4 D4 D3+D2 D2 - // D1+D0 D0 13 6 9 4 5 2 - // 1 0 - __m512i prev = _mm512_set1_epi64(prevValue); - __m512i cum_sum = _mm512_add_epi64(delta, _mm512_maskz_permutexvar_epi64(0xaa, sum_mask1, delta)); - cum_sum = _mm512_add_epi64(cum_sum, _mm512_maskz_permutexvar_epi64(0xcc, sum_mask2, cum_sum)); - cum_sum = _mm512_add_epi64(cum_sum, _mm512_maskz_permutexvar_epi64(0xf0, sum_mask3, cum_sum)); + // 7 6 5 4 3 2 1 + // 0 D7 D6 D5 D4 D3 D2 D1 + // D0 D6 0 D4 0 D2 0 D0 + // 0 D7+D6 D6 D5+D4 D4 D3+D2 D2 + // D1+D0 D0 13 6 9 4 5 2 + // 1 0 + __m512i prev = _mm512_set1_epi64(prevValue); + __m512i cum_sum = _mm512_add_epi64(delta, _mm512_maskz_permutexvar_epi64(0xaa, sum_mask1, delta)); + cum_sum = _mm512_add_epi64(cum_sum, _mm512_maskz_permutexvar_epi64(0xcc, sum_mask2, cum_sum)); + cum_sum = _mm512_add_epi64(cum_sum, _mm512_maskz_permutexvar_epi64(0xf0, sum_mask3, cum_sum)); - // 13 6 9 4 5 2 1 - // 0 D7,D6 D6 D5,D4 D4 D3,D2 D2 - // D1,D0 D0 +D5,D4 D5,D4, 0 0 D1,D0 D1,D0 - // 0 0 D7~D4 D6~D4 D5~D4 D4 D3~D0 D2~D0 - // D1~D0 D0 22 15 9 4 6 3 - // 1 0 - // - // D3~D0 D3~D0 D3~D0 D3~D0 0 0 0 - // 0 28 21 15 10 6 3 1 - // 0 + // 13 6 9 4 5 2 1 + // 0 D7,D6 D6 D5,D4 D4 D3,D2 D2 + // D1,D0 D0 +D5,D4 D5,D4, 0 0 D1,D0 D1,D0 + // 0 0 D7~D4 D6~D4 D5~D4 D4 D3~D0 D2~D0 + // D1~D0 D0 22 15 9 4 6 3 + // 1 0 + // + // D3~D0 D3~D0 D3~D0 D3~D0 0 0 0 + // 0 28 21 15 10 6 3 1 + // 0 - cum_sum = _mm512_add_epi64(cum_sum, prev); - _mm512_storeu_si512((__m512i *)&p[_pos], cum_sum); + cum_sum = _mm512_add_epi64(cum_sum, prev); + _mm512_storeu_si512((__m512i *)&p[_pos], cum_sum); - shiftBits = _mm512_add_epi64(shiftBits, inc); - prevValue = p[_pos + 7]; - _pos += 8; - } - // handle the remain value - for (int32_t i = 0; i < remain; i++) { - zigzag_value = ((w >> (v + (batch * bit * 8))) & mask); - prevValue += ZIGZAG_DECODE(int64_t, zigzag_value); + shiftBits = _mm512_add_epi64(shiftBits, inc); + prevValue = p[_pos + 7]; + _pos += 8; + } + // handle the remain value + for (int32_t i = 0; i < remain; i++) { + zigzag_value = ((w >> (v + (batch * bit * 8))) & mask); + prevValue += ZIGZAG_DECODE(int64_t, zigzag_value); - p[_pos++] = prevValue; - v += bit; - } -#endif - } else if (tsSIMDEnable && tsAVX2Supported) { - __m256i base = _mm256_set1_epi64x(w); - __m256i maskVal = _mm256_set1_epi64x(mask); - - __m256i shiftBits = _mm256_set_epi64x(bit * 3 + 4, bit * 2 + 4, bit + 4, 4); - __m256i inc = _mm256_set1_epi64x(bit << 2); - - for (int32_t i = 0; i < batch; ++i) { - __m256i after = _mm256_srlv_epi64(base, shiftBits); - __m256i zigzagVal = _mm256_and_si256(after, maskVal); - - // ZIGZAG_DECODE(T, v) (((v) >> 1) ^ -((T)((v)&1))) - __m256i signmask = _mm256_and_si256(_mm256_set1_epi64x(1), zigzagVal); - signmask = _mm256_sub_epi64(_mm256_setzero_si256(), signmask); - - // get four zigzag values here - __m256i delta = _mm256_xor_si256(_mm256_srli_epi64(zigzagVal, 1), signmask); - - // calculate the cumulative sum (prefix sum) for each number - // decode[0] = prevValue + final[0] - // decode[1] = decode[0] + final[1] -----> prevValue + final[0] + final[1] - // decode[2] = decode[1] + final[2] -----> prevValue + final[0] + final[1] + final[2] - // decode[3] = decode[2] + final[3] -----> prevValue + final[0] + final[1] + final[2] + final[3] - - // 1, 2, 3, 4 - //+ 0, 1, 0, 3 - // 1, 3, 3, 7 - // shift and add for the first round - __m128i prev = _mm_set1_epi64x(prevValue); - __m256i x = _mm256_slli_si256(delta, 8); - - delta = _mm256_add_epi64(delta, x); - _mm256_storeu_si256((__m256i *)&p[_pos], delta); - - // 1, 3, 3, 7 - //+ 0, 0, 3, 3 - // 1, 3, 6, 10 - // shift and add operation for the second round - __m128i firstPart = _mm_loadu_si128((__m128i *)&p[_pos]); - __m128i secondItem = _mm_set1_epi64x(p[_pos + 1]); - __m128i secPart = _mm_add_epi64(_mm_loadu_si128((__m128i *)&p[_pos + 2]), secondItem); - firstPart = _mm_add_epi64(firstPart, prev); - secPart = _mm_add_epi64(secPart, prev); - - // save it in the memory - _mm_storeu_si128((__m128i *)&p[_pos], firstPart); - _mm_storeu_si128((__m128i *)&p[_pos + 2], secPart); - - shiftBits = _mm256_add_epi64(shiftBits, inc); - prevValue = p[_pos + 3]; - _pos += 4; - } - - // handle the remain value - for (int32_t i = 0; i < remain; i++) { - zigzag_value = ((w >> (v + (batch * bit * 4))) & mask); - prevValue += ZIGZAG_DECODE(int64_t, zigzag_value); - - p[_pos++] = prevValue; - v += bit; - } - } else { // alternative implementation without SIMD instructions. - for (int32_t i = 0; i < elems && count < nelements; i++, count++) { - zigzag_value = ((w >> v) & mask); - prevValue += ZIGZAG_DECODE(int64_t, zigzag_value); - - p[_pos++] = prevValue; - v += bit; - } + p[_pos++] = prevValue; + v += bit; } } } break; @@ -292,7 +188,7 @@ int32_t tsDecompressIntImpl_Hw(const char *const input, const int32_t nelements, return nelements * word_length; #else - uError("unable run %s without avx2 instructions", __func__); + uError("unable run %s without avx512 instructions", __func__); return -1; #endif } diff --git a/source/util/src/terror.c b/source/util/src/terror.c index c9c0b7a971..855ac35c6c 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -649,7 +649,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_NOT_SINGLE_GROUP, "Not a single-group g TAOS_DEFINE_ERROR(TSDB_CODE_PAR_TAGS_NOT_MATCHED, "Tags number not matched") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_TAG_NAME, "Invalid tag name") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_NAME_OR_PASSWD_TOO_LONG, "Name or password too long") -TAOS_DEFINE_ERROR(TSDB_CODE_PAR_PASSWD_EMPTY, "Password can not be empty") +TAOS_DEFINE_ERROR(TSDB_CODE_PAR_PASSWD_TOO_SHORT_OR_EMPTY, "Password too short or empty") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_PORT, "Port should be an integer that is less than 65535 and greater than 0") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_ENDPOINT, "Endpoint should be in the format of 'fqdn:port'") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_EXPRIE_STATEMENT, "This statement is no longer supported") diff --git a/source/util/src/tworker.c b/source/util/src/tworker.c index 6370e6ca50..dbd8cb159e 100644 --- a/source/util/src/tworker.c +++ b/source/util/src/tworker.c @@ -823,6 +823,8 @@ bool tQueryAutoQWorkerTryRecycleWorker(SQueryAutoQWorkerPool *pPool, SQueryAutoQ int32_t tQueryAutoQWorkerInit(SQueryAutoQWorkerPool *pool) { int32_t code; + pool->exit = false; + (void)taosThreadMutexInit(&pool->poolLock, NULL); (void)taosThreadMutexInit(&pool->backupLock, NULL); (void)taosThreadMutexInit(&pool->waitingAfterBlockLock, NULL); diff --git a/source/util/test/CMakeLists.txt b/source/util/test/CMakeLists.txt index 655557b180..cde1392216 100644 --- a/source/util/test/CMakeLists.txt +++ b/source/util/test/CMakeLists.txt @@ -138,6 +138,10 @@ add_test( COMMAND logTest ) +IF(COMPILER_SUPPORT_AVX2) + MESSAGE(STATUS "AVX2 instructions is ACTIVATED") + set_source_files_properties(decompressTest.cpp PROPERTIES COMPILE_FLAGS -mavx2) +ENDIF() add_executable(decompressTest "decompressTest.cpp") target_link_libraries(decompressTest os util common gtest_main) add_test( @@ -145,6 +149,16 @@ add_test( COMMAND decompressTest ) + +IF($TD_LINUX) + add_executable(utilTests "utilTests.cpp") + target_link_libraries(utilTests os util common gtest_main) + add_test( + NAME utilTests + COMMAND utilTests + ) +ENDIF() + if(${TD_LINUX}) # terrorTest add_executable(terrorTest "terrorTest.cpp") diff --git a/source/util/test/errorCodeTable.ini b/source/util/test/errorCodeTable.ini index e837954a0b..f67c8ab834 100644 --- a/source/util/test/errorCodeTable.ini +++ b/source/util/test/errorCodeTable.ini @@ -463,7 +463,7 @@ TSDB_CODE_PAR_NOT_SINGLE_GROUP = 0x8000260C TSDB_CODE_PAR_TAGS_NOT_MATCHED = 0x8000260D TSDB_CODE_PAR_INVALID_TAG_NAME = 0x8000260E TSDB_CODE_PAR_NAME_OR_PASSWD_TOO_LONG = 0x80002610 -TSDB_CODE_PAR_PASSWD_EMPTY = 0x80002611 +TSDB_CODE_PAR_PASSWD_TOO_SHORT_OR_EMPTY = 0x80002611 TSDB_CODE_PAR_INVALID_PORT = 0x80002612 TSDB_CODE_PAR_INVALID_ENDPOINT = 0x80002613 TSDB_CODE_PAR_EXPRIE_STATEMENT = 0x80002614 diff --git a/source/util/test/utilTests.cpp b/source/util/test/utilTests.cpp index cb78743018..9a42213929 100644 --- a/source/util/test/utilTests.cpp +++ b/source/util/test/utilTests.cpp @@ -6,6 +6,7 @@ #include "tarray.h" #include "tcompare.h" +#include "tdatablock.h" namespace { } // namespace @@ -474,3 +475,67 @@ TEST(tsma, reverse_unit) { ASSERT_FALSE(tsmaIntervalCheck(12, 'n', 1, 'y', TSDB_TIME_PRECISION_NANO)); ASSERT_TRUE(tsmaIntervalCheck(3, 'n', 1, 'y', TSDB_TIME_PRECISION_NANO)); } + +template +void dataBlockNullTest(const F& setValFunc) { + int32_t totalRows = 16; + SColumnInfoData columnInfoData = createColumnInfoData(type, tDataTypes[type].bytes, 0); + SColumnDataAgg columnDataAgg = {.numOfNull = 0}; + + auto checkNull = [totalRows, &columnInfoData, &columnDataAgg](uint32_t row, bool expected) { + EXPECT_EQ(colDataIsNull_s(&columnInfoData, row), expected); + EXPECT_EQ(colDataIsNull_t(&columnInfoData, row, IS_VAR_DATA_TYPE(columnInfoData.info.type)), expected); + EXPECT_EQ(colDataIsNull(&columnInfoData, totalRows, row, NULL), expected); + columnDataAgg.numOfNull = totalRows; + EXPECT_EQ(colDataIsNull(&columnInfoData, totalRows, row, &columnDataAgg), columnInfoData.hasNull); + columnDataAgg.numOfNull = 0; + EXPECT_EQ(colDataIsNull(&columnInfoData, totalRows, row, &columnDataAgg), false); + }; + + columnInfoData.hasNull = false; + checkNull(0, false); + checkNull(1, false); + checkNull(2, false); + checkNull(totalRows - 2, false); + checkNull(totalRows - 1, false); + + if (IS_VAR_DATA_TYPE(type)) { + columnInfoData.varmeta.offset = (int32_t*)taosMemoryCalloc(totalRows, sizeof(int32_t)); + } else { + columnInfoData.pData = (char*)taosMemoryCalloc(totalRows, tDataTypes[type].bytes); + columnInfoData.nullbitmap = (char*)taosMemoryCalloc(((totalRows - 1) >> NBIT) + 1, 1); + ValType val = 1; + setValFunc(&columnInfoData, 1, &val); + val = 2; + setValFunc(&columnInfoData, 2, &val); + } + colDataSetNULL(&columnInfoData, 0); + colDataSetNNULL(&columnInfoData, 3, totalRows - 3); + checkNull(0, true); + checkNull(1, false); + checkNull(2, false); + checkNull(totalRows - 2, true); + checkNull(totalRows - 1, true); + + if (IS_VAR_DATA_TYPE(type)) { + taosMemoryFreeClear(columnInfoData.varmeta.offset); + } else { + taosMemoryFreeClear(columnInfoData.pData); + taosMemoryFreeClear(columnInfoData.nullbitmap); + checkNull(0, false); + checkNull(1, false); + checkNull(2, false); + checkNull(totalRows - 2, false); + checkNull(totalRows - 1, false); + } +} + +TEST(utilTest, tdatablockTestNull) { + dataBlockNullTest(colDataSetInt8); + dataBlockNullTest(colDataSetInt16); + dataBlockNullTest(colDataSetInt32); + dataBlockNullTest(colDataSetInt64); + dataBlockNullTest(colDataSetFloat); + dataBlockNullTest(colDataSetDouble); + dataBlockNullTest(colDataSetInt64); +} diff --git a/tests/army/query/function/ans/leastsquares.csv b/tests/army/query/function/ans/leastsquares.csv new file mode 100644 index 0000000000..3d5cd33336 --- /dev/null +++ b/tests/army/query/function/ans/leastsquares.csv @@ -0,0 +1,56 @@ + +taos> select leastsquares(1, 1, 1) + leastsquares(1, 1, 1) | +================================= + {slop:-nan, intercept:-nan} | + +taos> select leastsquares(cast(1.1 as float), 1, 1) + leastsquares(cast(1.1 as float), 1, 1) | +========================================= + {slop:-nan, intercept:-nan} | + +taos> select leastsquares(cast(1.1 as double), 1, 1) + leastsquares(cast(1.1 as double), 1, 1) | +========================================== + {slop:-nan, intercept:-nan} | + +taos> select leastsquares(cast(1 as tinyint), 1, 1) + leastsquares(cast(1 as tinyint), 1, 1) | +========================================= + {slop:-nan, intercept:-nan} | + +taos> select leastsquares(cast(100 as smallint), 1, 1) + leastsquares(cast(100 as smallint), 1, 1) | +============================================ + {slop:-nan, intercept:-nan} | + +taos> select leastsquares(cast(100000 as int), 1, 1) + leastsquares(cast(100000 as int), 1, 1) | +========================================== + {slop:-nan, intercept:-nan} | + +taos> select leastsquares(cast(10000000000 as bigint), 1, 1) + leastsquares(cast(10000000000 as bigint), 1, 1) | +================================================== + {slop:-nan, intercept:-nan} | + +taos> select leastsquares(cast(1 as tinyint unsigned), 1, 1) + leastsquares(cast(1 as tinyint unsigned), 1, 1) | +================================================== + {slop:-nan, intercept:-nan} | + +taos> select leastsquares(cast(100 as smallint unsigned), 1, 1) + leastsquares(cast(100 as smallint unsigned), 1, 1) | +===================================================== + {slop:-nan, intercept:-nan} | + +taos> select leastsquares(cast(100000 as int unsigned), 1, 1) + leastsquares(cast(100000 as int unsigned), 1, 1) | +=================================================== + {slop:-nan, intercept:-nan} | + +taos> select leastsquares(cast(10000000000 as bigint unsigned), 1, 1) + leastsquares(cast(10000000000 as bigint unsigned), 1, 1) | +=========================================================== + {slop:-nan, intercept:-nan} | + diff --git a/tests/army/query/function/ans/max.csv b/tests/army/query/function/ans/max.csv index f150ad1208..1570e1ebc9 100644 --- a/tests/army/query/function/ans/max.csv +++ b/tests/army/query/function/ans/max.csv @@ -603,3 +603,58 @@ taos> select location, max(current) from ts_4893.meters group by location order ============================================ beijing | 11.9989996 | +taos> select max(1) + max(1) | +======================== + 1 | + +taos> select max(cast(1 as tinyint)) + max(cast(1 as tinyint)) | +========================== + 1 | + +taos> select max(cast(100 as smallint)) + max(cast(100 as smallint)) | +============================= + 100 | + +taos> select max(cast(100000 as int)) + max(cast(100000 as int)) | +=========================== + 100000 | + +taos> select max(cast(10000000000 as bigint)) + max(cast(10000000000 as bigint)) | +=================================== + 10000000000 | + +taos> select max(cast(1 as tinyint unsigned)) + max(cast(1 as tinyint unsigned)) | +=================================== + 1 | + +taos> select max(cast(100 as smallint unsigned)) + max(cast(100 as smallint unsigned)) | +====================================== + 100 | + +taos> select max(cast(100000 as int unsigned)) + max(cast(100000 as int unsigned)) | +==================================== + 100000 | + +taos> select max(cast(10000000000 as bigint unsigned)) + max(cast(10000000000 as bigint unsigned)) | +============================================ + 10000000000 | + +taos> select max(cast(1.1 as float)) + max(cast(1.1 as float)) | +========================== + 1.1000000e+00 | + +taos> select max(cast(1.1 as double)) + max(cast(1.1 as double)) | +============================ + 1.100000000000000 | + diff --git a/tests/army/query/function/ans/min.csv b/tests/army/query/function/ans/min.csv index 9a8ba15287..1ea0c47e81 100644 --- a/tests/army/query/function/ans/min.csv +++ b/tests/army/query/function/ans/min.csv @@ -603,3 +603,58 @@ taos> select location, min(id) from ts_4893.meters group by location order by lo =================================== beijing | 0 | +taos> select min(1) + min(1) | +======================== + 1 | + +taos> select min(cast(1 as tinyint)) + min(cast(1 as tinyint)) | +========================== + 1 | + +taos> select min(cast(100 as smallint)) + min(cast(100 as smallint)) | +============================= + 100 | + +taos> select min(cast(100000 as int)) + min(cast(100000 as int)) | +=========================== + 100000 | + +taos> select min(cast(10000000000 as bigint)) + min(cast(10000000000 as bigint)) | +=================================== + 10000000000 | + +taos> select min(cast(1 as tinyint unsigned)) + min(cast(1 as tinyint unsigned)) | +=================================== + 1 | + +taos> select min(cast(100 as smallint unsigned)) + min(cast(100 as smallint unsigned)) | +====================================== + 100 | + +taos> select min(cast(100000 as int unsigned)) + min(cast(100000 as int unsigned)) | +==================================== + 100000 | + +taos> select min(cast(10000000000 as bigint unsigned)) + min(cast(10000000000 as bigint unsigned)) | +============================================ + 10000000000 | + +taos> select min(cast(1.1 as float)) + min(cast(1.1 as float)) | +========================== + 1.1000000e+00 | + +taos> select min(cast(1.1 as double)) + min(cast(1.1 as double)) | +============================ + 1.100000000000000 | + diff --git a/tests/army/query/function/ans/round.csv b/tests/army/query/function/ans/round.csv index 1b6ed548e7..4f9151c1ad 100644 --- a/tests/army/query/function/ans/round.csv +++ b/tests/army/query/function/ans/round.csv @@ -308,3 +308,53 @@ taos> select round(log(current), 2) from ts_4893.meters limit 1 ============================ 2.370000000000000 | +taos> select round(cast(1.0e+400 as float), 0); + round(cast(1.0e+400 as float), 0) | +==================================== + NULL | + +taos> select round(cast(1.0e+400 as double), 0); + round(cast(1.0e+400 as double), 0) | +===================================== + NULL | + +taos> select round(cast(5 as tinyint), 1); + round(cast(5 as tinyint), 1) | +=============================== + 5 | + +taos> select round(cast(50 as smallint), 1); + round(cast(50 as smallint), 1) | +================================= + 50 | + +taos> select round(cast(500 as int), 1); + round(cast(500 as int), 1) | +============================= + 500 | + +taos> select round(cast(50000 as bigint), 1); + round(cast(50000 as bigint), 1) | +================================== + 50000 | + +taos> select round(cast(5 as TINYINT UNSIGNED), 1); + round(cast(5 as tinyint unsigned), 1) | +======================================== + 5 | + +taos> select round(cast(50 as smallint unsigned), 1); + round(cast(50 as smallint unsigned), 1) | +========================================== + 50 | + +taos> select round(cast(500 as int unsigned), 1); + round(cast(500 as int unsigned), 1) | +====================================== + 500 | + +taos> select round(cast(50000 as bigint unsigned), 1) + round(cast(50000 as bigint unsigned), 1) | +=========================================== + 50000 | + diff --git a/tests/army/query/function/ans/sign.csv b/tests/army/query/function/ans/sign.csv index e15b4a74c7..45679af07f 100644 --- a/tests/army/query/function/ans/sign.csv +++ b/tests/army/query/function/ans/sign.csv @@ -121,6 +121,106 @@ taos> select SIGN(id) + id from ts_4893.meters order by ts limit 5 4.000000000000000 | 5.000000000000000 | +taos> select sign(cast(1 as tinyint)) + sign(cast(1 as tinyint)) | +=========================== + 1 | + +taos> select sign(cast(1 as smallint)) + sign(cast(1 as smallint)) | +============================ + 1 | + +taos> select sign(cast(1 as int)) + sign(cast(1 as int)) | +======================= + 1 | + +taos> select sign(cast(1 as bigint)) + sign(cast(1 as bigint)) | +========================== + 1 | + +taos> select sign(cast(1 as tinyint unsigned)) + sign(cast(1 as tinyint unsigned)) | +==================================== + 1 | + +taos> select sign(cast(1 as smallint unsigned)) + sign(cast(1 as smallint unsigned)) | +===================================== + 1 | + +taos> select sign(cast(1 as int unsigned)) + sign(cast(1 as int unsigned)) | +================================ + 1 | + +taos> select sign(cast(1 as bigint unsigned)) + sign(cast(1 as bigint unsigned)) | +=================================== + 1 | + +taos> select sign(cast(1 as float)) + sign(cast(1 as float)) | +========================= + 1.0000000e+00 | + +taos> select sign(cast(1 as double)) + sign(cast(1 as double)) | +============================ + 1.000000000000000 | + +taos> select sign(cast(NULL as tinyint)) + sign(cast(null as tinyint)) | +============================== + NULL | + +taos> select sign(cast(NULL as smallint)) + sign(cast(null as smallint)) | +=============================== + NULL | + +taos> select sign(cast(NULL as int)) + sign(cast(null as int)) | +========================== + NULL | + +taos> select sign(cast(NULL as bigint)) + sign(cast(null as bigint)) | +============================= + NULL | + +taos> select sign(cast(NULL as tinyint unsigned)) + sign(cast(null as tinyint unsigned)) | +======================================= + NULL | + +taos> select sign(cast(NULL as smallint unsigned)) + sign(cast(null as smallint unsigned)) | +======================================== + NULL | + +taos> select sign(cast(NULL as int unsigned)) + sign(cast(null as int unsigned)) | +=================================== + NULL | + +taos> select sign(cast(NULL as bigint unsigned)) + sign(cast(null as bigint unsigned)) | +====================================== + NULL | + +taos> select sign(cast(NULL as float)) + sign(cast(null as float)) | +============================ + NULL | + +taos> select sign(cast(NULL as double)) + sign(cast(null as double)) | +============================= + NULL | + taos> select SIGN(abs(10)) sign(abs(10)) | ======================== @@ -213,6 +313,34 @@ taos> select sign(current) from ts_4893.meters order by ts limit 10 1.0000000 | 1.0000000 | +taos> select sign(cast(current as float)) from ts_4893.d0 order by ts limit 10 + sign(cast(current as float)) | +=============================== + 1.0000000e+00 | + 1.0000000e+00 | + 1.0000000e+00 | + 1.0000000e+00 | + 1.0000000e+00 | + 1.0000000e+00 | + 1.0000000e+00 | + 1.0000000e+00 | + 1.0000000e+00 | + 1.0000000e+00 | + +taos> select sign(cast(current as float)) from ts_4893.meters order by ts limit 10 + sign(cast(current as float)) | +=============================== + 1.0000000e+00 | + 1.0000000e+00 | + 1.0000000e+00 | + 1.0000000e+00 | + 1.0000000e+00 | + 1.0000000e+00 | + 1.0000000e+00 | + 1.0000000e+00 | + 1.0000000e+00 | + 1.0000000e+00 | + taos> select sign(null) sign(null) | ======================== diff --git a/tests/army/query/function/ans/statecount.csv b/tests/army/query/function/ans/statecount.csv new file mode 100644 index 0000000000..d16b8443e2 --- /dev/null +++ b/tests/army/query/function/ans/statecount.csv @@ -0,0 +1,56 @@ + +taos> select statecount(1, 'GT', 1) + statecount(1, 'GT', 1) | +========================= + -1 | + +taos> select statecount(cast(1 as tinyint), 'GT', 1) + statecount(cast(1 as tinyint), 'GT', 1) | +========================================== + -1 | + +taos> select statecount(cast(100 as smallint), 'GT', 1) + statecount(cast(100 as smallint), 'GT', 1) | +============================================= + 1 | + +taos> select statecount(cast(100000 as int), 'GT', 1) + statecount(cast(100000 as int), 'GT', 1) | +=========================================== + 1 | + +taos> select statecount(cast(10000000000 as bigint), 'GT', 1) + statecount(cast(10000000000 as bigint), 'GT', 1) | +=================================================== + 1 | + +taos> select statecount(cast(1 as tinyint unsigned), 'GT', 1) + statecount(cast(1 as tinyint unsigned), 'GT', 1) | +=================================================== + -1 | + +taos> select statecount(cast(100 as smallint unsigned), 'GT', 1) + statecount(cast(100 as smallint unsigned), 'GT', 1) | +====================================================== + 1 | + +taos> select statecount(cast(100000 as int unsigned), 'GT', 1) + statecount(cast(100000 as int unsigned), 'GT', 1) | +==================================================== + 1 | + +taos> select statecount(cast(10000000000 as bigint unsigned), 'GT', 1) + statecount(cast(10000000000 as bigint unsigned), 'GT', 1) | +============================================================ + 1 | + +taos> select statecount(cast(1.1 as float), 'GT', 1) + statecount(cast(1.1 as float), 'GT', 1) | +========================================== + 1 | + +taos> select statecount(cast(1.1 as double), 'GT', 1) + statecount(cast(1.1 as double), 'GT', 1) | +=========================================== + 1 | + diff --git a/tests/army/query/function/ans/sum.csv b/tests/army/query/function/ans/sum.csv new file mode 100644 index 0000000000..3444b3f710 --- /dev/null +++ b/tests/army/query/function/ans/sum.csv @@ -0,0 +1,56 @@ + +taos> select sum(1) + sum(1) | +======================== + 1 | + +taos> select sum(cast(1 as tinyint)) + sum(cast(1 as tinyint)) | +========================== + 1 | + +taos> select sum(cast(100 as smallint)) + sum(cast(100 as smallint)) | +============================= + 100 | + +taos> select sum(cast(100000 as int)) + sum(cast(100000 as int)) | +=========================== + 100000 | + +taos> select sum(cast(10000000000 as bigint)) + sum(cast(10000000000 as bigint)) | +=================================== + 10000000000 | + +taos> select sum(cast(1 as tinyint unsigned)) + sum(cast(1 as tinyint unsigned)) | +=================================== + 1 | + +taos> select sum(cast(100 as smallint unsigned)) + sum(cast(100 as smallint unsigned)) | +====================================== + 100 | + +taos> select sum(cast(100000 as int unsigned)) + sum(cast(100000 as int unsigned)) | +==================================== + 100000 | + +taos> select sum(cast(10000000000 as bigint unsigned)) + sum(cast(10000000000 as bigint unsigned)) | +============================================ + 10000000000 | + +taos> select sum(cast(1.1 as float)) + sum(cast(1.1 as float)) | +============================ + 1.100000023841858 | + +taos> select sum(cast(1.1 as double)) + sum(cast(1.1 as double)) | +============================ + 1.100000000000000 | + diff --git a/tests/army/query/function/ans/trim.csv b/tests/army/query/function/ans/trim.csv index 6e2efbda51..eb821ffc91 100644 --- a/tests/army/query/function/ans/trim.csv +++ b/tests/army/query/function/ans/trim.csv @@ -179,6 +179,33 @@ taos> select trim(trailing '空格blank' from '空格blank空格中Tes空格blan =================================================================== 空格blank空格中Tes空格blank空 | +taos> select trim(both from nch1) from ts_4893.meters order by ts limit 5 + trim(both from nch1) | +================================= + novel | + 一二三四五六七八九十 | + update | + prision | + novel | + +taos> select trim(leading from nch1) from ts_4893.meters order by ts limit 5 + trim(leading from nch1) | +================================= + novel | + 一二三四五六七八九十 | + update | + prision | + novel | + +taos> select trim(trailing from nch1) from ts_4893.meters order by ts limit 5 + trim(trailing from nch1) | +================================= + novel | + 一二三四五六七八九十 | + update | + prision | + novel | + taos> select trim(nch2 from nch1) from ts_4893.meters where position(nch2 in nch1) != 0 order by ts limit 5 trim(nch2 from nch1) | ================================= diff --git a/tests/army/query/function/in/avg.in b/tests/army/query/function/in/avg.in new file mode 100644 index 0000000000..9284e8b1b7 --- /dev/null +++ b/tests/army/query/function/in/avg.in @@ -0,0 +1,11 @@ +select avg(1) +select avg(cast(1 as tinyint)) +select avg(cast(100 as smallint)) +select avg(cast(100000 as int)) +select avg(cast(10000000000 as bigint)) +select avg(cast(1 as tinyint unsigned)) +select avg(cast(100 as smallint unsigned)) +select avg(cast(100000 as int unsigned)) +select avg(cast(10000000000 as bigint unsigned)) +select avg(cast(1.1 as float)) +select avg(cast(1.1 as double)) \ No newline at end of file diff --git a/tests/army/query/function/in/leastsquares.in b/tests/army/query/function/in/leastsquares.in new file mode 100644 index 0000000000..2783a2a0c5 --- /dev/null +++ b/tests/army/query/function/in/leastsquares.in @@ -0,0 +1,11 @@ +select leastsquares(1, 1, 1) +select leastsquares(cast(1.1 as float), 1, 1) +select leastsquares(cast(1.1 as double), 1, 1) +select leastsquares(cast(1 as tinyint), 1, 1) +select leastsquares(cast(100 as smallint), 1, 1) +select leastsquares(cast(100000 as int), 1, 1) +select leastsquares(cast(10000000000 as bigint), 1, 1) +select leastsquares(cast(1 as tinyint unsigned), 1, 1) +select leastsquares(cast(100 as smallint unsigned), 1, 1) +select leastsquares(cast(100000 as int unsigned), 1, 1) +select leastsquares(cast(10000000000 as bigint unsigned), 1, 1) diff --git a/tests/army/query/function/in/max.in b/tests/army/query/function/in/max.in index efd4620f7b..336045afb5 100644 --- a/tests/army/query/function/in/max.in +++ b/tests/army/query/function/in/max.in @@ -26,3 +26,14 @@ select log(max(voltage) + 1) from ts_4893.meters select groupid, max(voltage) from ts_4893.meters group by groupid order by groupid select location, max(id) from ts_4893.meters group by location order by location select location, max(current) from ts_4893.meters group by location order by location +select max(1) +select max(cast(1 as tinyint)) +select max(cast(100 as smallint)) +select max(cast(100000 as int)) +select max(cast(10000000000 as bigint)) +select max(cast(1 as tinyint unsigned)) +select max(cast(100 as smallint unsigned)) +select max(cast(100000 as int unsigned)) +select max(cast(10000000000 as bigint unsigned)) +select max(cast(1.1 as float)) +select max(cast(1.1 as double)) diff --git a/tests/army/query/function/in/min.in b/tests/army/query/function/in/min.in index 910b8cc7bd..55d6853446 100644 --- a/tests/army/query/function/in/min.in +++ b/tests/army/query/function/in/min.in @@ -26,3 +26,14 @@ select log(min(voltage) + 1) from ts_4893.meters select groupid, min(voltage) from ts_4893.meters group by groupid order by groupid select location, min(current) from ts_4893.meters group by location order by location select location, min(id) from ts_4893.meters group by location order by location +select min(1) +select min(cast(1 as tinyint)) +select min(cast(100 as smallint)) +select min(cast(100000 as int)) +select min(cast(10000000000 as bigint)) +select min(cast(1 as tinyint unsigned)) +select min(cast(100 as smallint unsigned)) +select min(cast(100000 as int unsigned)) +select min(cast(10000000000 as bigint unsigned)) +select min(cast(1.1 as float)) +select min(cast(1.1 as double)) diff --git a/tests/army/query/function/in/round.in b/tests/army/query/function/in/round.in index bca293fc72..13dcb57117 100644 --- a/tests/army/query/function/in/round.in +++ b/tests/army/query/function/in/round.in @@ -47,3 +47,13 @@ select round(abs(voltage), 2) from ts_4893.meters limit 1 select round(pi() * phase, 3) from ts_4893.meters limit 1 select round(sqrt(voltage), 2) from ts_4893.meters limit 1 select round(log(current), 2) from ts_4893.meters limit 1 +select round(cast(1.0e+400 as float), 0); +select round(cast(1.0e+400 as double), 0); +select round(cast(5 as tinyint), 1); +select round(cast(50 as smallint), 1); +select round(cast(500 as int), 1); +select round(cast(50000 as bigint), 1); +select round(cast(5 as TINYINT UNSIGNED), 1); +select round(cast(50 as smallint unsigned), 1); +select round(cast(500 as int unsigned), 1); +select round(cast(50000 as bigint unsigned), 1); \ No newline at end of file diff --git a/tests/army/query/function/in/sign.in b/tests/army/query/function/in/sign.in index 436c884d36..25780eb31c 100644 --- a/tests/army/query/function/in/sign.in +++ b/tests/army/query/function/in/sign.in @@ -20,6 +20,26 @@ select SIGN(2) * SIGN(1) from ts_4893.meters limit 1 select SIGN(2) / SIGN(1) from ts_4893.meters limit 1 select SIGN(1) + id from ts_4893.meters order by ts limit 5 select SIGN(id) + id from ts_4893.meters order by ts limit 5 +select sign(cast(1 as tinyint)) +select sign(cast(1 as smallint)) +select sign(cast(1 as int)) +select sign(cast(1 as bigint)) +select sign(cast(1 as tinyint unsigned)) +select sign(cast(1 as smallint unsigned)) +select sign(cast(1 as int unsigned)) +select sign(cast(1 as bigint unsigned)) +select sign(cast(1 as float)) +select sign(cast(1 as double)) +select sign(cast(NULL as tinyint)) +select sign(cast(NULL as smallint)) +select sign(cast(NULL as int)) +select sign(cast(NULL as bigint)) +select sign(cast(NULL as tinyint unsigned)) +select sign(cast(NULL as smallint unsigned)) +select sign(cast(NULL as int unsigned)) +select sign(cast(NULL as bigint unsigned)) +select sign(cast(NULL as float)) +select sign(cast(NULL as double)) select SIGN(abs(10)) select SIGN(abs(-10)) select abs(SIGN(10)) @@ -34,6 +54,8 @@ select sign(-1) select sign(-10) select sign(current) from ts_4893.d0 order by ts limit 10 select sign(current) from ts_4893.meters order by ts limit 10 +select sign(cast(current as float)) from ts_4893.d0 order by ts limit 10 +select sign(cast(current as float)) from ts_4893.meters order by ts limit 10 select sign(null) select sign(25) select sign(-10) diff --git a/tests/army/query/function/in/statecount.in b/tests/army/query/function/in/statecount.in new file mode 100644 index 0000000000..df64918d61 --- /dev/null +++ b/tests/army/query/function/in/statecount.in @@ -0,0 +1,11 @@ +select statecount(1, 'GT', 1) +select statecount(cast(1 as tinyint), 'GT', 1) +select statecount(cast(100 as smallint), 'GT', 1) +select statecount(cast(100000 as int), 'GT', 1) +select statecount(cast(10000000000 as bigint), 'GT', 1) +select statecount(cast(1 as tinyint unsigned), 'GT', 1) +select statecount(cast(100 as smallint unsigned), 'GT', 1) +select statecount(cast(100000 as int unsigned), 'GT', 1) +select statecount(cast(10000000000 as bigint unsigned), 'GT', 1) +select statecount(cast(1.1 as float), 'GT', 1) +select statecount(cast(1.1 as double), 'GT', 1) diff --git a/tests/army/query/function/in/sum.in b/tests/army/query/function/in/sum.in new file mode 100644 index 0000000000..4caf5ecdbf --- /dev/null +++ b/tests/army/query/function/in/sum.in @@ -0,0 +1,11 @@ +select sum(1) +select sum(cast(1 as tinyint)) +select sum(cast(100 as smallint)) +select sum(cast(100000 as int)) +select sum(cast(10000000000 as bigint)) +select sum(cast(1 as tinyint unsigned)) +select sum(cast(100 as smallint unsigned)) +select sum(cast(100000 as int unsigned)) +select sum(cast(10000000000 as bigint unsigned)) +select sum(cast(1.1 as float)) +select sum(cast(1.1 as double)) diff --git a/tests/army/query/function/in/trim.in b/tests/army/query/function/in/trim.in index a0ad54dd7c..e96fb08675 100644 --- a/tests/army/query/function/in/trim.in +++ b/tests/army/query/function/in/trim.in @@ -34,6 +34,9 @@ select trim('空格blank' from '空格blank空格中Tes空格blank空') select trim(both '空格blank' from '空格blank空格中Tes空格blank空') select trim(leading '空格blank' from '空格blank空格中Tes空格blank空') select trim(trailing '空格blank' from '空格blank空格中Tes空格blank空') +select trim(both from nch1) from ts_4893.meters order by ts limit 5 +select trim(leading from nch1) from ts_4893.meters order by ts limit 5 +select trim(trailing from nch1) from ts_4893.meters order by ts limit 5 select trim(nch2 from nch1) from ts_4893.meters where position(nch2 in nch1) != 0 order by ts limit 5 select trim(both nch2 from nch1) from ts_4893.meters where position(nch2 in nch1) != 0 order by ts limit 5 select trim(leading nch2 from nch1) from ts_4893.meters where position(nch2 in nch1) != 0 order by ts limit 5 diff --git a/tests/army/query/function/test_function.py b/tests/army/query/function/test_function.py index d54460804a..c583d08cec 100644 --- a/tests/army/query/function/test_function.py +++ b/tests/army/query/function/test_function.py @@ -294,6 +294,18 @@ class TDTestCase(TBase): tdSql.error("select min(nonexistent_column) from ts_4893.meters;") + def test_sum(self): + self.test_normal_query_new("sum") + + def test_statecount(self): + self.test_normal_query_new("statecount") + + def test_avg(self): + self.test_normal_query_new("avg") + + def test_leastsquares(self): + self.test_normal_query_new("leastsquares") + def test_error(self): tdSql.error("select * from (select to_iso8601(ts, timezone()), timezone() from ts_4893.meters \ order by ts desc) limit 1000;", expectErrInfo="Invalid parameter data type : to_iso8601") # TS-5340 @@ -336,6 +348,10 @@ class TDTestCase(TBase): # agg function self.test_stddev_pop() self.test_varpop() + self.test_avg() + self.test_sum() + self.test_leastsquares() + self.test_statecount() # select function self.test_max() diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index a0046f9834..0ad26cbdf4 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -218,6 +218,8 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/slimit.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/slimit.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/slimit.py -Q 4 +,,n,system-test,./pytest.sh python3 ./test.py -f 2-query/ts-5761.py +,,n,system-test,./pytest.sh python3 ./test.py -f 2-query/ts-5761-scalemode.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ts-5712.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ts-4233.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ts-4233.py -Q 2 @@ -315,6 +317,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_ts5466.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_ts-5473.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/td-32187.py +,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/td-33225.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_ts4563.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_td32526.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_replay.py @@ -1090,6 +1093,10 @@ ,,n,system-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/insertMix.py -N 3 ,,n,system-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/stt.py -N 3 ,,n,system-test,python3 ./test.py -f eco-system/meta/database/keep_time_offset.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/operator.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/operator.py -Q 2 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/operator.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/operator.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f eco-system/manager/schema_change.py -N 3 -M 3 #tsim test @@ -1416,34 +1423,36 @@ ,,y,script,./test.sh -f tsim/stream/sliding.sim ,,y,script,./test.sh -f tsim/stream/state0.sim ,,y,script,./test.sh -f tsim/stream/state1.sim -#,,y,script,./test.sh -f tsim/stream/streamInterpDelete0.sim -#,,y,script,./test.sh -f tsim/stream/streamInterpDelete1.sim -#,,y,script,./test.sh -f tsim/stream/streamInterpDelete2.sim -#,,y,script,./test.sh -f tsim/stream/streamInterpError.sim +,,y,script,./test.sh -f tsim/stream/streamInterpDelete0.sim +,,y,script,./test.sh -f tsim/stream/streamInterpDelete1.sim +,,y,script,./test.sh -f tsim/stream/streamInterpDelete2.sim +,,y,script,./test.sh -f tsim/stream/streamInterpError.sim ,,y,script,./test.sh -f tsim/stream/streamInterpForceWindowClose.sim ,,y,script,./test.sh -f tsim/stream/streamInterpForceWindowClose1.sim ,,y,script,./test.sh -f tsim/stream/streamInterpFwcError.sim -#,,y,script,./test.sh -f tsim/stream/streamInterpHistory.sim +,,y,script,./test.sh -f tsim/stream/streamInterpHistory.sim #,,y,script,./test.sh -f tsim/stream/streamInterpHistory1.sim -#,,y,script,./test.sh -f tsim/stream/streamInterpLarge.sim -#,,y,script,./test.sh -f tsim/stream/streamInterpLinear0.sim -#,,y,script,./test.sh -f tsim/stream/streamInterpNext0.sim -#,,y,script,./test.sh -f tsim/stream/streamInterpOther.sim +,,y,script,./test.sh -f tsim/stream/streamInterpLarge.sim +,,y,script,./test.sh -f tsim/stream/streamInterpLinear0.sim +,,y,script,./test.sh -f tsim/stream/streamInterpNext0.sim +,,y,script,./test.sh -f tsim/stream/streamInterpOther.sim #,,y,script,./test.sh -f tsim/stream/streamInterpOther1.sim -#,,y,script,./test.sh -f tsim/stream/streamInterpPartitionBy0.sim +,,y,script,./test.sh -f tsim/stream/streamInterpPartitionBy0.sim +,,y,script,./test.sh -f tsim/stream/streamInterpPartitionBy1.sim #,,y,script,./test.sh -f tsim/stream/streamInterpPrev0.sim #,,y,script,./test.sh -f tsim/stream/streamInterpPrev1.sim -#,,y,script,./test.sh -f tsim/stream/streamInterpPrimaryKey0.sim -#,,y,script,./test.sh -f tsim/stream/streamInterpPrimaryKey1.sim -#,,y,script,./test.sh -f tsim/stream/streamInterpPrimaryKey2.sim -#,,y,script,./test.sh -f tsim/stream/streamInterpPrimaryKey3.sim -#,,y,script,./test.sh -f tsim/stream/streamInterpUpdate.sim -#,,y,script,./test.sh -f tsim/stream/streamInterpUpdate1.sim -#,,y,script,./test.sh -f tsim/stream/streamInterpValue0.sim -#,,y,script,./test.sh -f tsim/stream/streamPrimaryKey0.sim -#,,y,script,./test.sh -f tsim/stream/streamPrimaryKey1.sim -#,,y,script,./test.sh -f tsim/stream/streamPrimaryKey2.sim -#,,y,script,./test.sh -f tsim/stream/streamPrimaryKey3.sim +,,y,script,./test.sh -f tsim/stream/streamInterpPrimaryKey0.sim +,,y,script,./test.sh -f tsim/stream/streamInterpPrimaryKey1.sim +,,y,script,./test.sh -f tsim/stream/streamInterpPrimaryKey2.sim +,,y,script,./test.sh -f tsim/stream/streamInterpPrimaryKey3.sim +,,y,script,./test.sh -f tsim/stream/streamInterpUpdate.sim +,,y,script,./test.sh -f tsim/stream/streamInterpUpdate1.sim +,,y,script,./test.sh -f tsim/stream/streamInterpUpdate2.sim +,,y,script,./test.sh -f tsim/stream/streamInterpValue0.sim +,,y,script,./test.sh -f tsim/stream/streamPrimaryKey0.sim +,,y,script,./test.sh -f tsim/stream/streamPrimaryKey1.sim +,,y,script,./test.sh -f tsim/stream/streamPrimaryKey2.sim +,,y,script,./test.sh -f tsim/stream/streamPrimaryKey3.sim ,,y,script,./test.sh -f tsim/stream/streamTwaError.sim ,,y,script,./test.sh -f tsim/stream/streamTwaFwcFill.sim ,,y,script,./test.sh -f tsim/stream/streamTwaFwcFillPrimaryKey.sim diff --git a/tests/pytest/auto_crash_gen.py b/tests/pytest/auto_crash_gen.py index f6b31b4691..316f2ead0f 100755 --- a/tests/pytest/auto_crash_gen.py +++ b/tests/pytest/auto_crash_gen.py @@ -384,7 +384,8 @@ Core dir: {core_dir} if text_result == "success": send_msg(notification_robot_url, get_msg(text)) else: - send_msg(alert_robot_url, get_msg(text)) + send_msg(alert_robot_url, get_msg(text)) + send_msg(notification_robot_url, get_msg(text)) #send_msg(get_msg(text)) except Exception as e: diff --git a/tests/pytest/auto_crash_gen_valgrind.py b/tests/pytest/auto_crash_gen_valgrind.py index b346aca308..b7af68cd2f 100755 --- a/tests/pytest/auto_crash_gen_valgrind.py +++ b/tests/pytest/auto_crash_gen_valgrind.py @@ -419,6 +419,7 @@ Core dir: {core_dir} send_msg(notification_robot_url, get_msg(text)) else: send_msg(alert_robot_url, get_msg(text)) + send_msg(notification_robot_url, get_msg(text)) #send_msg(get_msg(text)) except Exception as e: diff --git a/tests/pytest/auto_crash_gen_valgrind_cluster.py b/tests/pytest/auto_crash_gen_valgrind_cluster.py index 522ad48640..df40b60967 100755 --- a/tests/pytest/auto_crash_gen_valgrind_cluster.py +++ b/tests/pytest/auto_crash_gen_valgrind_cluster.py @@ -406,7 +406,8 @@ Core dir: {core_dir} if text_result == "success": send_msg(notification_robot_url, get_msg(text)) else: - send_msg(alert_robot_url, get_msg(text)) + send_msg(alert_robot_url, get_msg(text)) + send_msg(notification_robot_url, get_msg(text)) #send_msg(get_msg(text)) except Exception as e: diff --git a/tests/run_all_ci_cases.sh b/tests/run_all_ci_cases.sh index 959af66d19..11670800b8 100644 --- a/tests/run_all_ci_cases.sh +++ b/tests/run_all_ci_cases.sh @@ -7,12 +7,120 @@ GREEN_DARK='\033[0;32m' GREEN_UNDERLINE='\033[4;32m' NC='\033[0m' -TDENGINE_DIR=/root/TDinternal/community +function print_color() { + local color="$1" + local message="$2" + echo -e "${color}${message}${NC}" +} + +# 初始化参数 +TDENGINE_DIR="/root/TDinternal/community" +BRANCH="" +SAVE_LOG="notsave" + +# 解析命令行参数 +while getopts "hd:b:t:s:" arg; do + case $arg in + d) + TDENGINE_DIR=$OPTARG + ;; + b) + BRANCH=$OPTARG + ;; + s) + SAVE_LOG=$OPTARG + ;; + h) + echo "Usage: $(basename $0) -d [TDengine_dir] -b [branch] -s [save ci case log]" + echo " -d [TDengine_dir] [default /root/TDinternal/community] " + echo " -b [branch] [default local branch] " + echo " -s [save/notsave] [default save ci case log in TDengine_dir/tests/ci_bak] " + exit 0 + ;; + ?) + echo "Usage: ./$(basename $0) -h" + exit 1 + ;; + esac +done + +# 检查是否提供了命令名称 +if [ -z "$TDENGINE_DIR" ]; then + echo "Error: TDengine dir is required." + echo "Usage: $(basename $0) -d [TDengine_dir] -b [branch] -s [save ci case log] " + echo " -d [TDengine_dir] [default /root/TDinternal/community] " + echo " -b [branch] [default local branch] " + echo " -s [save/notsave] [default save ci case log in TDengine_dir/tests/ci_bak] " + exit 1 +fi -#echo "TDENGINE_DIR = $TDENGINE_DIR" +echo "TDENGINE_DIR = $TDENGINE_DIR" today=`date +"%Y%m%d"` -TDENGINE_ALLCI_REPORT=$TDENGINE_DIR/tests/all-ci-report-$today.log +TDENGINE_ALLCI_REPORT="$TDENGINE_DIR/tests/all-ci-report-$today.log" +BACKUP_DIR="$TDENGINE_DIR/tests/ci_bak" +mkdir -p "$BACKUP_DIR" +#cd $BACKUP_DIR && rm -rf * + + +function buildTDengine() { + print_color "$GREEN" "TDengine build start" + + # pull parent code + cd "$TDENGINE_DIR/../" + print_color "$GREEN" "git pull parent code..." + git remote prune origin > /dev/null + git remote update > /dev/null + + # pull tdengine code + cd $TDENGINE_DIR + print_color "$GREEN" "git pull tdengine code..." + git remote prune origin > /dev/null + git remote update > /dev/null + REMOTE_COMMIT=`git rev-parse --short remotes/origin/$branch` + LOCAL_COMMIT=`git rev-parse --short @` + print_color "$GREEN" " LOCAL: $LOCAL_COMMIT" + print_color "$GREEN" "REMOTE: $REMOTE_COMMIT" + + if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then + print_color "$GREEN" "repo up-to-date" + else + print_color "$GREEN" "repo need to pull" + fi + + git reset --hard + git checkout -- . + git checkout $branch + git checkout -- . + git clean -f + git pull + + [ -d $TDENGINE_DIR/debug ] || mkdir $TDENGINE_DIR/debug + cd $TDENGINE_DIR/debug + + print_color "$GREEN" "rebuild.." + LOCAL_COMMIT=`git rev-parse --short @` + + rm -rf * + makecmd="cmake -DBUILD_TEST=false -DBUILD_HTTP=false -DBUILD_DEPENDENCY_TESTS=0 -DBUILD_TOOLS=true -DBUILD_GEOS=true -DBUILD_TEST=true -DBUILD_CONTRIB=false ../../" + print_color "$GREEN" "$makecmd" + $makecmd + + make -j 8 install + + print_color "$GREEN" "TDengine build end" +} + + +# 检查并获取分支名称 +if [ -n "$BRANCH" ]; then + branch="$BRANCH" + print_color "$GREEN" "Testing branch: $branch " + print_color "$GREEN" "Build is required for this test!" + buildTDengine +else + print_color "$GREEN" "Build is not required for this test!" +fi function runCasesOneByOne () { @@ -20,23 +128,50 @@ function runCasesOneByOne () { if [[ "$line" != "#"* ]]; then cmd=`echo $line | cut -d',' -f 5` if [[ "$2" == "sim" ]] && [[ $line == *"script"* ]]; then + echo $cmd case=`echo $cmd | cut -d' ' -f 3` + case_file=`echo $case | tr -d ' /' ` start_time=`date +%s` - date +%F\ %T | tee -a $TDENGINE_ALLCI_REPORT && timeout 20m $cmd > /dev/null 2>&1 && \ - echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_ALLCI_REPORT \ - || echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_ALLCI_REPORT + date +%F\ %T | tee -a $TDENGINE_ALLCI_REPORT && timeout 20m $cmd > $TDENGINE_DIR/tests/$case_file.log 2>&1 && \ + echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_ALLCI_REPORT || \ + echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_ALLCI_REPORT + + # # 记录日志和备份 + # mkdir -p "$BACKUP_DIR/$case_file" + # tar --exclude='*.sock*' -czf "$BACKUP_DIR/$case_file/sim.tar.gz" -C "$TDENGINE_DIR/.." sim + # mv "$TDENGINE_DIR/tests/$case_file.log" "$BACKUP_DIR/$case_file" + + if [ "$SAVE_LOG" == "save" ]; then + mkdir -p "$BACKUP_DIR/$case_file" + tar --exclude='*.sock*' -czf "$BACKUP_DIR/$case_file/sim.tar.gz" -C "$TDENGINE_DIR/.." sim + mv "$TDENGINE_DIR/tests/$case_file.log" "$BACKUP_DIR/$case_file" + else + echo "This case not save log!" + fi + end_time=`date +%s` echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_ALLCI_REPORT - + elif [[ "$line" == *"$2"* ]]; then + echo $cmd if [[ "$cmd" == *"pytest.sh"* ]]; then cmd=`echo $cmd | cut -d' ' -f 2-20` fi - case=`echo $cmd | cut -d' ' -f 4-20` + case=`echo $cmd | cut -d' ' -f 4-20` + case_file=`echo $case | tr -d ' /' ` start_time=`date +%s` - date +%F\ %T | tee -a $TDENGINE_ALLCI_REPORT && timeout 20m $cmd > /dev/null 2>&1 && \ + date +%F\ %T | tee -a $TDENGINE_ALLCI_REPORT && timeout 20m $cmd > $TDENGINE_DIR/tests/$case_file.log 2>&1 && \ echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_ALLCI_REPORT || \ echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_ALLCI_REPORT + + if [ "$SAVE_LOG" == "save" ]; then + mkdir -p "$BACKUP_DIR/$case_file" + tar --exclude='*.sock*' -czf "$BACKUP_DIR/$case_file/sim.tar.gz" -C "$TDENGINE_DIR/.." sim + mv "$TDENGINE_DIR/tests/$case_file.log" "$BACKUP_DIR/$case_file" + else + echo "This case not save log!" + fi + end_time=`date +%s` echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_ALLCI_REPORT fi @@ -45,62 +180,62 @@ function runCasesOneByOne () { } function runUnitTest() { - echo "=== Run unit test case ===" - echo " $TDENGINE_DIR/debug" - cd $TDENGINE_DIR/debug + print_color "$GREEN" "=== Run unit test case ===" + print_color "$GREEN" " $TDENGINE_DIR/../debug" + cd $TDENGINE_DIR/../debug ctest -j12 - echo "3.0 unit test done" + print_color "$GREEN" "3.0 unit test done" } function runSimCases() { - echo "=== Run sim cases ===" + print_color "$GREEN" "=== Run sim cases ===" cd $TDENGINE_DIR/tests/script - runCasesOneByOne $TDENGINE_DIR/tests/parallel_test/cases-test.task sim + runCasesOneByOne $TDENGINE_DIR/tests/parallel_test/cases.task sim totalSuccess=`grep 'sim success' $TDENGINE_ALLCI_REPORT | wc -l` if [ "$totalSuccess" -gt "0" ]; then - echo "### Total $totalSuccess SIM test case(s) succeed! ###" | tee -a $TDENGINE_ALLCI_REPORT + print_color "$GREEN" "### Total $totalSuccess SIM test case(s) succeed! ###" | tee -a $TDENGINE_ALLCI_REPORT fi totalFailed=`grep 'sim failed\|fault' $TDENGINE_ALLCI_REPORT | wc -l` if [ "$totalFailed" -ne "0" ]; then - echo "### Total $totalFailed SIM test case(s) failed! ###" | tee -a $TDENGINE_ALLCI_REPORT + print_color "$RED" "### Total $totalFailed SIM test case(s) failed! ###" | tee -a $TDENGINE_ALLCI_REPORT fi } function runPythonCases() { - echo "=== Run python cases ===" + print_color "$GREEN" "=== Run python cases ===" cd $TDENGINE_DIR/tests/parallel_test - sed -i '/compatibility.py/d' cases-test.task + sed -i '/compatibility.py/d' cases.task # army cd $TDENGINE_DIR/tests/army - runCasesOneByOne ../parallel_test/cases-test.task army + runCasesOneByOne ../parallel_test/cases.task army # system-test cd $TDENGINE_DIR/tests/system-test - runCasesOneByOne ../parallel_test/cases-test.task system-test + runCasesOneByOne ../parallel_test/cases.task system-test # develop-test cd $TDENGINE_DIR/tests/develop-test - runCasesOneByOne ../parallel_test/cases-test.task develop-test + runCasesOneByOne ../parallel_test/cases.task develop-test totalSuccess=`grep 'py success' $TDENGINE_ALLCI_REPORT | wc -l` if [ "$totalSuccess" -gt "0" ]; then - echo "### Total $totalSuccess python test case(s) succeed! ###" | tee -a $TDENGINE_ALLCI_REPORT + print_color "$GREEN" "### Total $totalSuccess python test case(s) succeed! ###" | tee -a $TDENGINE_ALLCI_REPORT fi totalFailed=`grep 'py failed\|fault' $TDENGINE_ALLCI_REPORT | wc -l` if [ "$totalFailed" -ne "0" ]; then - echo "### Total $totalFailed python test case(s) failed! ###" | tee -a $TDENGINE_ALLCI_REPORT + print_color "$RED" "### Total $totalFailed python test case(s) failed! ###" | tee -a $TDENGINE_ALLCI_REPORT fi } function runTest() { - echo "run Test" + print_color "$GREEN" "run Test" cd $TDENGINE_DIR [ -d sim ] && rm -rf sim @@ -119,20 +254,20 @@ function runTest() { } function stopTaosd { - echo "Stop taosd start" - systemctl stop taosd - PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + print_color "$GREEN" "Stop taosd start" + systemctl stop taosd + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` while [ -n "$PID" ] do - pkill -TERM -x taosd - sleep 1 - PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` done - echo "Stop tasod end" + print_color "$GREEN" "Stop tasod end" } function stopTaosadapter { - echo "Stop taosadapter" + print_color "$GREEN" "Stop taosadapter" systemctl stop taosadapter.service PID=`ps -ef|grep -w taosadapter | grep -v grep | awk '{print $2}'` while [ -n "$PID" ] @@ -141,18 +276,18 @@ function stopTaosadapter { sleep 1 PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` done - echo "Stop tasoadapter end" + print_color "$GREEN" "Stop tasoadapter end" } WORK_DIR=/root/ date >> $WORK_DIR/date.log -echo "Run ALL CI Test Cases" | tee -a $WORK_DIR/date.log +print_color "$GREEN" "Run all ci test cases" | tee -a $WORK_DIR/date.log stopTaosd runTest date >> $WORK_DIR/date.log -echo "End of CI Test Cases" | tee -a $WORK_DIR/date.log \ No newline at end of file +print_color "$GREEN" "End of ci test cases" | tee -a $WORK_DIR/date.log \ No newline at end of file diff --git a/tests/run_local_coverage.sh b/tests/run_local_coverage.sh new file mode 100755 index 0000000000..dfb0e8f9b7 --- /dev/null +++ b/tests/run_local_coverage.sh @@ -0,0 +1,407 @@ +#!/bin/bash + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +function print_color() { + local color="$1" + local message="$2" + echo -e "${color}${message}${NC}" +} + +# Initialization parameter +TDENGINE_DIR="/root/TDinternal/community" +BRANCH="" +TDENGINE_GCDA_DIR="/root/TDinternal/community/debug/" +LCOV_DIR="/usr/local/bin" + +# Parse command line parameters +while getopts "hd:b:f:c:u:i:l:" arg; do + case $arg in + d) + TDENGINE_DIR=$OPTARG + ;; + b) + BRANCH=$OPTARG + ;; + f) + TDENGINE_GCDA_DIR=$OPTARG + ;; + c) + TEST_CASE=$OPTARG + ;; + u) + UNIT_TEST_CASE=$OPTARG + ;; + i) + BRANCH_BUILD=$OPTARG + ;; + l) + LCOV_DIR=$OPTARG + ;; + h) + echo "Usage: $(basename $0) -d [TDengine dir] -b [Test branch] -i [Build test branch] -f [TDengine gcda dir] -c [Test single case/all cases] -u [Unit test case] -l [Lcov dir]" + echo " -d [TDengine dir] [default /root/TDinternal/community; eg: /home/TDinternal/community] " + echo " -b [Test branch] [default local branch; eg:cover/3.0] " + echo " -i [Build test branch] [default no:not build, but still install ;yes:will build and install ] " + echo " -f [TDengine gcda dir] [default /root/TDinternal/community/debug; eg:/root/TDinternal/community/debug/community/source/dnode/vnode/CMakeFiles/vnode.dir/src/tq/] " + echo " -c [Test single case/all cases] [default null; -c all : include parallel_test/longtimeruning_cases.task and all unit cases; -c task : include parallel_test/longtimeruning_cases.task; single case: eg: -c './test.sh -f tsim/stream/streamFwcIntervalFill.sim' ] " + echo " -u [Unit test case] [default null; eg: './schedulerTest' ] " + echo " -l [Lcov bin dir] [default /usr/local/bin; eg: '/root/TDinternal/community/tests/lcov-1.14/bin' ] " + exit 0 + ;; + ?) + echo "Usage: ./$(basename $0) -h" + exit 1 + ;; + esac +done + +# Check if the command name is provided +if [ -z "$TDENGINE_DIR" ]; then + echo "Error: TDengine dir is required." + echo "Usage: $(basename $0) -d [TDengine dir] -b [Test branch] -i [Build test branch] -f [TDengine gcda dir] -c [Test single case/all cases] -u [Unit test case] -l [Lcov dir] " + echo " -d [TDengine dir] [default /root/TDinternal/community; eg: /home/TDinternal/community] " + echo " -b [Test branch] [default local branch; eg:cover/3.0] " + echo " -i [Build test branch] [default no:not build, but still install ;yes:will build and install ] " + echo " -f [TDengine gcda dir] [default /root/TDinternal/community/debug; eg:/root/TDinternal/community/debug/community/source/dnode/vnode/CMakeFiles/vnode.dir/src/tq/] " + echo " -c [Test casingle case/all casesse] [default null; -c all : include parallel_test/longtimeruning_cases.task and all unit cases; -c task : include parallel_test/longtimeruning_cases.task; single case: eg: -c './test.sh -f tsim/stream/streamFwcIntervalFill.sim' ] " + echo " -u [Unit test case] [default null; eg: './schedulerTest' ] " + echo " -l [Lcov bin dir] [default /usr/local/bin; eg: '/root/TDinternal/community/tests/lcov-1.14/bin' ] " + exit 1 +fi + + +echo "TDENGINE_DIR = $TDENGINE_DIR" +today=`date +"%Y%m%d"` +TDENGINE_ALLCI_REPORT="$TDENGINE_DIR/tests/all-ci-report-$today.log" + +function pullTDengine() { + print_color "$GREEN" "TDengine pull start" + + # pull parent code + cd "$TDENGINE_DIR/../" + print_color "$GREEN" "git pull parent code..." + + git reset --hard + git checkout -- . + git checkout $branch + git checkout -- . + git clean -f + git pull + + # pull tdengine code + cd $TDENGINE_DIR + print_color "$GREEN" "git pull tdengine code..." + + git reset --hard + git checkout -- . + git checkout $branch + git checkout -- . + git clean -f + git pull + + print_color "$GREEN" "TDengine pull end" +} + +function buildTDengine() { + print_color "$GREEN" "TDengine build start" + + [ -d $TDENGINE_DIR/debug ] || mkdir $TDENGINE_DIR/debug + cd $TDENGINE_DIR/debug + + print_color "$GREEN" "rebuild.." + rm -rf * + makecmd="cmake -DCOVER=true -DBUILD_TEST=false -DBUILD_HTTP=false -DBUILD_DEPENDENCY_TESTS=0 -DBUILD_TOOLS=true -DBUILD_GEOS=true -DBUILD_TEST=true -DBUILD_CONTRIB=false ../../" + print_color "$GREEN" "$makecmd" + $makecmd + make -j 8 install +} + +# Check and get the branch name and build branch +if [ -n "$BRANCH" ] && [ -z "$BRANCH_BUILD" ] ; then + branch="$BRANCH" + print_color "$GREEN" "Testing branch: $branch " + print_color "$GREEN" "Build is required for this test!" + pullTDengine + buildTDengine +elif [ -n "$BRANCH_BUILD" ] && [ "$BRANCH_BUILD" == "yes" ] ; then + branch="$BRANCH" + print_color "$GREEN" "Testing branch: $branch " + print_color "$GREEN" "Build is required for this test!" + pullTDengine + buildTDengine +elif [ -n "$BRANCH_BUILD" ] && [ "$BRANCH_BUILD" == "no" ] ; then + branch="$BRANCH" + print_color "$GREEN" "Testing branch: $branch " + print_color "$GREEN" "not build,only install!" + cd "$TDENGINE_DIR/../" + git pull + cd "$TDENGINE_DIR/" + git pull + cd $TDENGINE_DIR/debug + make -j 8 install +else + print_color "$GREEN" "Build is not required for this test!" +fi + +function runCasesOneByOne () { + while read -r line; do + if [[ "$line" != "#"* ]]; then + cmd=`echo $line | cut -d',' -f 5` + if [[ "$2" == "sim" ]] && [[ $line == *"script"* ]]; then + echo $cmd + case=`echo $cmd | cut -d' ' -f 3` + case_file=`echo $case | tr -d ' /' ` + start_time=`date +%s` + date +%F\ %T | tee -a $TDENGINE_ALLCI_REPORT && timeout 20m $cmd > $TDENGINE_DIR/tests/$case_file.log 2>&1 && \ + echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_ALLCI_REPORT || \ + echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_ALLCI_REPORT + end_time=`date +%s` + echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_ALLCI_REPORT + + elif [[ "$line" == *"$2"* ]]; then + echo $cmd + if [[ "$cmd" == *"pytest.sh"* ]]; then + cmd=`echo $cmd | cut -d' ' -f 2-20` + fi + case=`echo $cmd | cut -d' ' -f 4-20` + case_file=`echo $case | tr -d ' /' ` + start_time=`date +%s` + date +%F\ %T | tee -a $TDENGINE_ALLCI_REPORT && timeout 20m $cmd > $TDENGINE_DIR/tests/$case_file.log 2>&1 && \ + echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_ALLCI_REPORT || \ + echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_ALLCI_REPORT + end_time=`date +%s` + echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_ALLCI_REPORT + fi + fi + done < $1 +} + +function runUnitTest() { + print_color "$GREEN" "=== Run unit test case ===" + print_color "$GREEN" " $TDENGINE_DIR/debug" + cd $TDENGINE_DIR/debug + ctest -j12 + print_color "$GREEN" "3.0 unit test done" +} + +function runSimCases() { + print_color "$GREEN" "=== Run sim cases ===" + + cd $TDENGINE_DIR/tests/script + runCasesOneByOne $TDENGINE_DIR/tests/parallel_test/longtimeruning_cases.task sim + + totalSuccess=`grep 'sim success' $TDENGINE_ALLCI_REPORT | wc -l` + if [ "$totalSuccess" -gt "0" ]; then + print_color "$GREEN" "### Total $totalSuccess SIM test case(s) succeed! ###" | tee -a $TDENGINE_ALLCI_REPORT + fi + + totalFailed=`grep 'sim failed\|fault' $TDENGINE_ALLCI_REPORT | wc -l` + if [ "$totalFailed" -ne "0" ]; then + print_color "$RED" "### Total $totalFailed SIM test case(s) failed! ###" | tee -a $TDENGINE_ALLCI_REPORT + fi +} + +function runPythonCases() { + print_color "$GREEN" "=== Run python cases ===" + + cd $TDENGINE_DIR/tests/parallel_test + sed -i '/compatibility.py/d' longtimeruning_cases.task + + # army + cd $TDENGINE_DIR/tests/army + runCasesOneByOne ../parallel_test/longtimeruning_cases.task army + + # system-test + cd $TDENGINE_DIR/tests/system-test + runCasesOneByOne ../parallel_test/longtimeruning_cases.task system-test + + # develop-test + cd $TDENGINE_DIR/tests/develop-test + runCasesOneByOne ../parallel_test/longtimeruning_cases.task develop-test + + totalSuccess=`grep 'py success' $TDENGINE_ALLCI_REPORT | wc -l` + if [ "$totalSuccess" -gt "0" ]; then + print_color "$GREEN" "### Total $totalSuccess python test case(s) succeed! ###" | tee -a $TDENGINE_ALLCI_REPORT + fi + + totalFailed=`grep 'py failed\|fault' $TDENGINE_ALLCI_REPORT | wc -l` + if [ "$totalFailed" -ne "0" ]; then + print_color "$RED" "### Total $totalFailed python test case(s) failed! ###" | tee -a $TDENGINE_ALLCI_REPORT + fi +} + + +function runTest_all() { + print_color "$GREEN" "run Test" + + cd $TDENGINE_DIR + [ -d sim ] && rm -rf sim + [ -f $TDENGINE_ALLCI_REPORT ] && rm $TDENGINE_ALLCI_REPORT + + runUnitTest + runSimCases + runPythonCases + + stopTaosd + cd $TDENGINE_DIR/tests/script + find . -name '*.sql' | xargs rm -f + + cd $TDENGINE_DIR/tests/pytest + find . -name '*.sql' | xargs rm -f +} + + +function runTest() { + print_color "$GREEN" "run Test" + + cd $TDENGINE_DIR + [ -d sim ] && rm -rf sim + [ -f $TDENGINE_ALLCI_REPORT ] && rm $TDENGINE_ALLCI_REPORT + + if [ -n "$TEST_CASE" ] && [ "$TEST_CASE" != "all" ] && [ "$TEST_CASE" != "task" ]; then + TEST_CASE="$TEST_CASE" + print_color "$GREEN" "Test case: $TEST_CASE " + cd $TDENGINE_DIR/tests/script/ && $TEST_CASE + cd $TDENGINE_DIR/tests/army/ && $TEST_CASE + cd $TDENGINE_DIR/tests/system-test/ && $TEST_CASE + cd $TDENGINE_DIR/tests/develop-test/ && $TEST_CASE + elif [ "$TEST_CASE" == "all" ]; then + print_color "$GREEN" "Test case is : parallel_test/longtimeruning_cases.task and all unit cases" + runTest_all + elif [ "$TEST_CASE" == "task" ]; then + print_color "$GREEN" "Test case is only: parallel_test/longtimeruning_cases.task " + runSimCases + runPythonCases + elif [ -n "$UNIT_TEST_CASE" ]; then + UNIT_TEST_CASE="$UNIT_TEST_CASE" + cd $TDENGINE_DIR/debug/build/bin/ && $UNIT_TEST_CASE + else + print_color "$GREEN" "Test case is null" + fi + + + stopTaosd + cd $TDENGINE_DIR/tests/script + find . -name '*.sql' | xargs rm -f + + cd $TDENGINE_DIR/tests/pytest + find . -name '*.sql' | xargs rm -f +} + +function lcovFunc { + echo "collect data by lcov" + cd $TDENGINE_DIR + + if [ -n "$TDENGINE_GCDA_DIR" ]; then + TDENGINE_GCDA_DIR="$TDENGINE_GCDA_DIR" + print_color "$GREEN" "Test gcda file dir: $TDENGINE_GCDA_DIR " + else + print_color "$GREEN" "Test gcda file dir is default: /root/TDinternal/community/debug" + fi + + if [ -n "$LCOV_DIR" ]; then + LCOV_DIR="$LCOV_DIR" + print_color "$GREEN" "Lcov bin dir: $LCOV_DIR " + else + print_color "$GREEN" "Lcov bin dir is default" + fi + + # collect data + $LCOV_DIR/lcov -d "$TDENGINE_GCDA_DIR" -capture --rc lcov_branch_coverage=1 --rc genhtml_branch_coverage=1 --no-external -b $TDENGINE_DIR -o coverage.info + + # remove exclude paths + $LCOV_DIR/lcov --remove coverage.info \ + '*/contrib/*' '*/test/*' '*/packaging/*' '*/taos-tools/*' '*/taosadapter/*' '*/TSZ/*' \ + '*/AccessBridgeCalls.c' '*/ttszip.c' '*/dataInserter.c' '*/tlinearhash.c' '*/tsimplehash.c' '*/tsdbDiskData.c' '/*/enterprise/*' '*/docs/*' '*/sim/*'\ + '*/texpr.c' '*/runUdf.c' '*/schDbg.c' '*/syncIO.c' '*/tdbOs.c' '*/pushServer.c' '*/osLz4.c'\ + '*/tbase64.c' '*/tbuffer.c' '*/tdes.c' '*/texception.c' '*/examples/*' '*/tidpool.c' '*/tmempool.c'\ + '*/clientJniConnector.c' '*/clientTmqConnector.c' '*/version.cc'\ + '*/tthread.c' '*/tversion.c' '*/ctgDbg.c' '*/schDbg.c' '*/qwDbg.c' '*/tencode.h' \ + '*/shellAuto.c' '*/shellTire.c' '*/shellCommand.c'\ + '*/sql.c' '*/sql.y' '*/smaSnapshot.c' '*/smaCommit.c' '*/debug/*' '*/tests/*'\ + --rc lcov_branch_coverage=1 -o coverage.info + + # generate result + echo "generate result" + $LCOV_DIR/lcov -l --rc lcov_branch_coverage=1 coverage.info | tee -a $TDENGINE_COVERAGE_REPORT + +} + +function stopTaosd { + print_color "$GREEN" "Stop taosd start" + systemctl stop taosd + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done + print_color "$GREEN" "Stop tasod end" +} + +function stopTaosadapter { + print_color "$GREEN" "Stop taosadapter" + systemctl stop taosadapter.service + PID=`ps -ef|grep -w taosadapter | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosadapter + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done + print_color "$GREEN" "Stop tasoadapter end" + +} + +WORK_DIR=/root + +date >> $WORK_DIR/date.log +print_color "$GREEN" "Run local coverage test cases" | tee -a $WORK_DIR/date.log + +stopTaosd + +runTest + +lcovFunc + + +date >> $WORK_DIR/date.log +print_color "$GREEN" "End of local coverage test cases" | tee -a $WORK_DIR/date.log + + +# Define coverage information files and output directories +COVERAGE_INFO="$TDENGINE_DIR/coverage.info" +OUTPUT_DIR="$WORK_DIR/coverage_report" + +# Check whether the coverage information file exists +if [ ! -f "$COVERAGE_INFO" ]; then + echo "Error: $COVERAGE_INFO not found!" + exit 1 +fi + +if [ -n "$LCOV_DIR" ]; then + LCOV_DIR="$LCOV_DIR" + print_color "$GREEN" "Lcov bin dir: $LCOV_DIR " +else + print_color "$GREEN" "Lcov bin dir is default" +fi +# Generate local HTML reports +$LCOV_DIR/genhtml "$COVERAGE_INFO" --branch-coverage --function-coverage --output-directory "$OUTPUT_DIR" + +# Check whether the report was generated successfully +if [ $? -eq 0 ]; then + echo "HTML coverage report generated successfully in $OUTPUT_DIR" + echo "For more details : " + echo "http://192.168.1.61:7000/" +else + echo "Error generating HTML coverage report" + exit 1 +fi + diff --git a/tests/run_local_coverage_only_branch.sh b/tests/run_local_coverage_only_branch.sh new file mode 100755 index 0000000000..9329e448be --- /dev/null +++ b/tests/run_local_coverage_only_branch.sh @@ -0,0 +1,388 @@ +#!/bin/bash + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +function print_color() { + local color="$1" + local message="$2" + echo -e "${color}${message}${NC}" +} + +# Initialization parameter +TDENGINE_DIR="/root/TDinternal/community" +BRANCH="" +TDENGINE_GCDA_DIR="/root/TDinternal/community/debug/" + +# Parse command line parameters +while getopts "hd:b:f:c:u:i:" arg; do + case $arg in + d) + TDENGINE_DIR=$OPTARG + ;; + b) + BRANCH=$OPTARG + ;; + f) + TDENGINE_GCDA_DIR=$OPTARG + ;; + c) + TEST_CASE=$OPTARG + ;; + u) + UNIT_TEST_CASE=$OPTARG + ;; + i) + BRANCH_BUILD=$OPTARG + ;; + h) + echo "Usage: $(basename $0) -d [TDengine dir] -b [Test branch] -i [Build test branch] -f [TDengine gcda dir] -c [Test single case/all cases] -u [Unit test case]" + echo " -d [TDengine dir] [default /root/TDinternal/community; eg: /home/TDinternal/community] " + echo " -b [Test branch] [default local branch; eg:cover/3.0] " + echo " -i [Build test branch] [default no:not build, but still install ;yes:will build and install ] " + echo " -f [TDengine gcda dir] [default /root/TDinternal/community/debug; eg:/root/TDinternal/community/debug/community/source/dnode/vnode/CMakeFiles/vnode.dir/src/tq/] " + echo " -c [Test single case/all cases] [default null; -c all : include parallel_test/longtimeruning_cases.task and all unit cases; -c task : include parallel_test/longtimeruning_cases.task; single case: eg: -c './test.sh -f tsim/stream/streamFwcIntervalFill.sim' ] " + echo " -u [Unit test case] [default null; eg: './schedulerTest' ] " + exit 0 + ;; + ?) + echo "Usage: ./$(basename $0) -h" + exit 1 + ;; + esac +done + +# Check if the command name is provided +if [ -z "$TDENGINE_DIR" ]; then + echo "Error: TDengine dir is required." + echo "Usage: $(basename $0) -d [TDengine dir] -b [Test branch] -i [Build test branch] -f [TDengine gcda dir] -c [Test single case/all cases] -u [Unit test case] " + echo " -d [TDengine dir] [default /root/TDinternal/community; eg: /home/TDinternal/community] " + echo " -b [Test branch] [default local branch; eg:cover/3.0] " + echo " -i [Build test branch] [default no:not build, but still install ;yes:will build and install ] " + echo " -f [TDengine gcda dir] [default /root/TDinternal/community/debug; eg:/root/TDinternal/community/debug/community/source/dnode/vnode/CMakeFiles/vnode.dir/src/tq/] " + echo " -c [Test casingle case/all casesse] [default null; -c all : include parallel_test/longtimeruning_cases.task and all unit cases; -c task : include parallel_test/longtimeruning_cases.task; single case: eg: -c './test.sh -f tsim/stream/streamFwcIntervalFill.sim' ] " + echo " -u [Unit test case] [default null; eg: './schedulerTest' ] " + exit 1 +fi + + +echo "TDENGINE_DIR = $TDENGINE_DIR" +today=`date +"%Y%m%d"` +TDENGINE_ALLCI_REPORT="$TDENGINE_DIR/tests/all-ci-report-$today.log" + +function pullTDengine() { + print_color "$GREEN" "TDengine pull start" + + # pull parent code + cd "$TDENGINE_DIR/../" + print_color "$GREEN" "git pull parent code..." + + git reset --hard + git checkout -- . + git checkout $branch + git checkout -- . + git clean -f + git pull + + # pull tdengine code + cd $TDENGINE_DIR + print_color "$GREEN" "git pull tdengine code..." + + git reset --hard + git checkout -- . + git checkout $branch + git checkout -- . + git clean -f + git pull + + print_color "$GREEN" "TDengine pull end" +} + +function buildTDengine() { + print_color "$GREEN" "TDengine build start" + + [ -d $TDENGINE_DIR/debug ] || mkdir $TDENGINE_DIR/debug + cd $TDENGINE_DIR/debug + + print_color "$GREEN" "rebuild.." + rm -rf * + makecmd="cmake -DCOVER=true -DBUILD_TEST=false -DBUILD_HTTP=false -DBUILD_DEPENDENCY_TESTS=0 -DBUILD_TOOLS=true -DBUILD_GEOS=true -DBUILD_TEST=true -DBUILD_CONTRIB=false ../../" + print_color "$GREEN" "$makecmd" + $makecmd + make -j 8 install +} + +# Check and get the branch name and build branch +if [ -n "$BRANCH" ] && [ -z "$BRANCH_BUILD" ] ; then + branch="$BRANCH" + print_color "$GREEN" "Testing branch: $branch " + print_color "$GREEN" "Build is required for this test!" + pullTDengine + buildTDengine +elif [ -n "$BRANCH_BUILD" ] && [ "$BRANCH_BUILD" == "yes" ] ; then + branch="$BRANCH" + print_color "$GREEN" "Testing branch: $branch " + print_color "$GREEN" "Build is required for this test!" + pullTDengine + buildTDengine +elif [ -n "$BRANCH_BUILD" ] && [ "$BRANCH_BUILD" == "no" ] ; then + branch="$BRANCH" + print_color "$GREEN" "Testing branch: $branch " + print_color "$GREEN" "not build,only install!" + cd "$TDENGINE_DIR/../" + git pull + cd "$TDENGINE_DIR/" + git pull + cd $TDENGINE_DIR/debug + make -j 8 install +else + print_color "$GREEN" "Build is not required for this test!" +fi + +function runCasesOneByOne () { + while read -r line; do + if [[ "$line" != "#"* ]]; then + cmd=`echo $line | cut -d',' -f 5` + if [[ "$2" == "sim" ]] && [[ $line == *"script"* ]]; then + echo $cmd + case=`echo $cmd | cut -d' ' -f 3` + case_file=`echo $case | tr -d ' /' ` + start_time=`date +%s` + date +%F\ %T | tee -a $TDENGINE_ALLCI_REPORT && timeout 20m $cmd > $TDENGINE_DIR/tests/$case_file.log 2>&1 && \ + echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_ALLCI_REPORT || \ + echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_ALLCI_REPORT + end_time=`date +%s` + echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_ALLCI_REPORT + + elif [[ "$line" == *"$2"* ]]; then + echo $cmd + if [[ "$cmd" == *"pytest.sh"* ]]; then + cmd=`echo $cmd | cut -d' ' -f 2-20` + fi + case=`echo $cmd | cut -d' ' -f 4-20` + case_file=`echo $case | tr -d ' /' ` + start_time=`date +%s` + date +%F\ %T | tee -a $TDENGINE_ALLCI_REPORT && timeout 20m $cmd > $TDENGINE_DIR/tests/$case_file.log 2>&1 && \ + echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_ALLCI_REPORT || \ + echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_ALLCI_REPORT + end_time=`date +%s` + echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_ALLCI_REPORT + fi + fi + done < $1 +} + +function runUnitTest() { + print_color "$GREEN" "=== Run unit test case ===" + print_color "$GREEN" " $TDENGINE_DIR/debug" + cd $TDENGINE_DIR/debug + ctest -j12 + print_color "$GREEN" "3.0 unit test done" +} + +function runSimCases() { + print_color "$GREEN" "=== Run sim cases ===" + + cd $TDENGINE_DIR/tests/script + runCasesOneByOne $TDENGINE_DIR/tests/parallel_test/longtimeruning_cases.task sim + + totalSuccess=`grep 'sim success' $TDENGINE_ALLCI_REPORT | wc -l` + if [ "$totalSuccess" -gt "0" ]; then + print_color "$GREEN" "### Total $totalSuccess SIM test case(s) succeed! ###" | tee -a $TDENGINE_ALLCI_REPORT + fi + + totalFailed=`grep 'sim failed\|fault' $TDENGINE_ALLCI_REPORT | wc -l` + if [ "$totalFailed" -ne "0" ]; then + print_color "$RED" "### Total $totalFailed SIM test case(s) failed! ###" | tee -a $TDENGINE_ALLCI_REPORT + fi +} + +function runPythonCases() { + print_color "$GREEN" "=== Run python cases ===" + + cd $TDENGINE_DIR/tests/parallel_test + sed -i '/compatibility.py/d' longtimeruning_cases.task + + # army + cd $TDENGINE_DIR/tests/army + runCasesOneByOne ../parallel_test/longtimeruning_cases.task army + + # system-test + cd $TDENGINE_DIR/tests/system-test + runCasesOneByOne ../parallel_test/longtimeruning_cases.task system-test + + # develop-test + cd $TDENGINE_DIR/tests/develop-test + runCasesOneByOne ../parallel_test/longtimeruning_cases.task develop-test + + totalSuccess=`grep 'py success' $TDENGINE_ALLCI_REPORT | wc -l` + if [ "$totalSuccess" -gt "0" ]; then + print_color "$GREEN" "### Total $totalSuccess python test case(s) succeed! ###" | tee -a $TDENGINE_ALLCI_REPORT + fi + + totalFailed=`grep 'py failed\|fault' $TDENGINE_ALLCI_REPORT | wc -l` + if [ "$totalFailed" -ne "0" ]; then + print_color "$RED" "### Total $totalFailed python test case(s) failed! ###" | tee -a $TDENGINE_ALLCI_REPORT + fi +} + + +function runTest_all() { + print_color "$GREEN" "run Test" + + cd $TDENGINE_DIR + [ -d sim ] && rm -rf sim + [ -f $TDENGINE_ALLCI_REPORT ] && rm $TDENGINE_ALLCI_REPORT + + runUnitTest + runSimCases + runPythonCases + + stopTaosd + cd $TDENGINE_DIR/tests/script + find . -name '*.sql' | xargs rm -f + + cd $TDENGINE_DIR/tests/pytest + find . -name '*.sql' | xargs rm -f +} + + +function runTest() { + print_color "$GREEN" "run Test" + + cd $TDENGINE_DIR + [ -d sim ] && rm -rf sim + [ -f $TDENGINE_ALLCI_REPORT ] && rm $TDENGINE_ALLCI_REPORT + + if [ -n "$TEST_CASE" ] && [ "$TEST_CASE" != "all" ] && [ "$TEST_CASE" != "task" ]; then + TEST_CASE="$TEST_CASE" + print_color "$GREEN" "Test case: $TEST_CASE " + cd $TDENGINE_DIR/tests/script/ && $TEST_CASE + cd $TDENGINE_DIR/tests/army/ && $TEST_CASE + cd $TDENGINE_DIR/tests/system-test/ && $TEST_CASE + cd $TDENGINE_DIR/tests/develop-test/ && $TEST_CASE + elif [ "$TEST_CASE" == "all" ]; then + print_color "$GREEN" "Test case is : parallel_test/longtimeruning_cases.task and all unit cases" + runTest_all + elif [ "$TEST_CASE" == "task" ]; then + print_color "$GREEN" "Test case is only: parallel_test/longtimeruning_cases.task " + runSimCases + runPythonCases + elif [ -n "$UNIT_TEST_CASE" ]; then + UNIT_TEST_CASE="$UNIT_TEST_CASE" + cd $TDENGINE_DIR/debug/build/bin/ && $UNIT_TEST_CASE + else + print_color "$GREEN" "Test case is null" + fi + + + stopTaosd + cd $TDENGINE_DIR/tests/script + find . -name '*.sql' | xargs rm -f + + cd $TDENGINE_DIR/tests/pytest + find . -name '*.sql' | xargs rm -f +} + +function lcovFunc { + echo "collect data by lcov" + cd $TDENGINE_DIR + + if [ -n "$TDENGINE_GCDA_DIR" ]; then + TDENGINE_GCDA_DIR="$TDENGINE_GCDA_DIR" + print_color "$GREEN" "Test gcda file dir: $TDENGINE_GCDA_DIR " + else + print_color "$GREEN" "Test gcda file dir is default: /root/TDinternal/community/debug" + fi + + # collect data + lcov -d "$TDENGINE_GCDA_DIR" -capture --rc lcov_branch_coverage=0 --rc genhtml_branch_coverage=1 --no-external -b $TDENGINE_DIR -o coverage.info + + # remove exclude paths + lcov --remove coverage.info \ + '*/contrib/*' '*/test/*' '*/packaging/*' '*/taos-tools/*' '*/taosadapter/*' '*/TSZ/*' \ + '*/AccessBridgeCalls.c' '*/ttszip.c' '*/dataInserter.c' '*/tlinearhash.c' '*/tsimplehash.c' '*/tsdbDiskData.c' '/*/enterprise/*' '*/docs/*' '*/sim/*'\ + '*/texpr.c' '*/runUdf.c' '*/schDbg.c' '*/syncIO.c' '*/tdbOs.c' '*/pushServer.c' '*/osLz4.c'\ + '*/tbase64.c' '*/tbuffer.c' '*/tdes.c' '*/texception.c' '*/examples/*' '*/tidpool.c' '*/tmempool.c'\ + '*/clientJniConnector.c' '*/clientTmqConnector.c' '*/version.cc' '*/branch/*'\ + '*/tthread.c' '*/tversion.c' '*/ctgDbg.c' '*/schDbg.c' '*/qwDbg.c' '*/tencode.h' \ + '*/shellAuto.c' '*/shellTire.c' '*/shellCommand.c'\ + '*/sql.c' '*/sql.y' '*/smaSnapshot.c' '*/smaCommit.c' '*/debug/*' '*/tests/*'\ + --rc lcov_branch_coverage=1 -o coverage.info + + # generate result + echo "generate result" + lcov -l --rc lcov_branch_coverage=1 coverage.info | tee -a $TDENGINE_COVERAGE_REPORT + +} + +function stopTaosd { + print_color "$GREEN" "Stop taosd start" + systemctl stop taosd + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done + print_color "$GREEN" "Stop tasod end" +} + +function stopTaosadapter { + print_color "$GREEN" "Stop taosadapter" + systemctl stop taosadapter.service + PID=`ps -ef|grep -w taosadapter | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosadapter + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done + print_color "$GREEN" "Stop tasoadapter end" + +} + +WORK_DIR=/root + +date >> $WORK_DIR/date.log +print_color "$GREEN" "Run local coverage test cases" | tee -a $WORK_DIR/date.log + +stopTaosd + +runTest + +lcovFunc + + +date >> $WORK_DIR/date.log +print_color "$GREEN" "End of local coverage test cases" | tee -a $WORK_DIR/date.log + + +# Define coverage information files and output directories +COVERAGE_INFO="$TDENGINE_DIR/coverage.info" +OUTPUT_DIR="$WORK_DIR/coverage_report" + +# Check whether the coverage information file exists +if [ ! -f "$COVERAGE_INFO" ]; then + echo "Error: $COVERAGE_INFO not found!" + exit 1 +fi + +# Generate local HTML reports +genhtml "$COVERAGE_INFO" --branch-coverage --function-coverage --output-directory "$OUTPUT_DIR" + +# Check whether the report was generated successfully +if [ $? -eq 0 ]; then + echo "HTML coverage report generated successfully in $OUTPUT_DIR" + echo "For more details : " + echo "http://192.168.1.61:7000/" +else + echo "Error generating HTML coverage report" + exit 1 +fi + diff --git a/tests/script/api/makefile b/tests/script/api/makefile index b871c5f3ff..a270a6c0ed 100644 --- a/tests/script/api/makefile +++ b/tests/script/api/makefile @@ -29,6 +29,8 @@ exe: # gcc $(CFLAGS) ./stmt2-get-fields.c -o $(ROOT)stmt2-get-fields $(LFLAGS) # gcc $(CFLAGS) ./stmt2-nohole.c -o $(ROOT)stmt2-nohole $(LFLAGS) gcc $(CFLAGS) ./stmt-crash.c -o $(ROOT)stmt-crash $(LFLAGS) + gcc $(CFLAGS) ./stmt-insert-dupkeys.c -o $(ROOT)stmt-insert-dupkeys $(LFLAGS) + gcc $(CFLAGS) ./stmt2-insert-dupkeys.c -o $(ROOT)stmt2-insert-dupkeys $(LFLAGS) clean: rm $(ROOT)batchprepare @@ -47,3 +49,5 @@ clean: rm $(ROOT)stmt2-get-fields rm $(ROOT)stmt2-nohole rm $(ROOT)stmt-crash + rm $(ROOT)stmt-insert-dupkeys + rm $(ROOT)stmt2-insert-dupkeys diff --git a/tests/script/api/stmt-insert-dupkeys.c b/tests/script/api/stmt-insert-dupkeys.c new file mode 100644 index 0000000000..b564fbb21d --- /dev/null +++ b/tests/script/api/stmt-insert-dupkeys.c @@ -0,0 +1,234 @@ +// compile with +// gcc -o stmt-insert-dupkeys stmt-insert-dupkeys.c -ltaos +#include +#include +#include +#include "taos.h" + +#define NUMROWS 3 + +/** + * @brief execute sql only and ignore result set + * + * @param taos + * @param sql + */ +void executeSQL(TAOS *taos, const char *sql) { + TAOS_RES *res = taos_query(taos, sql); + int code = taos_errno(res); + if (code != 0) { + printf("%s\n", taos_errstr(res)); + taos_free_result(res); + taos_close(taos); + exit(EXIT_FAILURE); + } + taos_free_result(res); +} + +/** + * @brief exit program when error occur. + * + * @param stmt + * @param code + * @param msg + */ +void checkErrorCode(TAOS_STMT *stmt, int code, const char *msg) { + if (code != 0) { + printf("%s. error: %s\n", msg, taos_stmt_errstr(stmt)); + exit(EXIT_FAILURE); + } +} + +void prepareBindTags(TAOS_MULTI_BIND *tags) { + // bind table name and tags + char *location = "California.SanFrancisco"; + int groupId = 2; + tags[0].buffer_type = TSDB_DATA_TYPE_BINARY; + tags[0].buffer_length = strlen(location); + tags[0].length = (int32_t *)&tags[0].buffer_length; + tags[0].buffer = location; + tags[0].is_null = NULL; + + tags[1].buffer_type = TSDB_DATA_TYPE_INT; + tags[1].buffer_length = sizeof(int); + tags[1].length = (int32_t *)&tags[1].buffer_length; + tags[1].buffer = &groupId; + tags[1].is_null = NULL; +} + +void prepareBindParams(TAOS_MULTI_BIND *params, int64_t *ts, float *current, int *voltage, float *phase) { + // is_null array + char is_null[NUMROWS] = {0}; + // length array + int32_t int64Len[NUMROWS] = {sizeof(int64_t)}; + int32_t floatLen[NUMROWS] = {sizeof(float)}; + int32_t intLen[NUMROWS] = {sizeof(int)}; + + params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[0].buffer_length = sizeof(int64_t); + params[0].buffer = ts; + params[0].length = int64Len; + params[0].is_null = is_null; + params[0].num = NUMROWS; + + params[1].buffer_type = TSDB_DATA_TYPE_FLOAT; + params[1].buffer_length = sizeof(float); + params[1].buffer = current; + params[1].length = floatLen; + params[1].is_null = is_null; + params[1].num = NUMROWS; + + params[2].buffer_type = TSDB_DATA_TYPE_INT; + params[2].buffer_length = sizeof(int); + params[2].buffer = voltage; + params[2].length = intLen; + params[2].is_null = is_null; + params[2].num = NUMROWS; + + params[3].buffer_type = TSDB_DATA_TYPE_FLOAT; + params[3].buffer_length = sizeof(float); + params[3].buffer = phase; + params[3].length = floatLen; + params[3].is_null = is_null; + params[3].num = NUMROWS; +} + +/** + * @brief insert data using stmt API + * + * @param taos + */ +void insertData(TAOS *taos, int64_t *ts, float *current, int *voltage, float *phase) { + // init + TAOS_STMT *stmt = taos_stmt_init(taos); + + // prepare + const char *sql = "INSERT INTO ? USING meters TAGS(?, ?) values(?, ?, ?, ?)"; + int code = taos_stmt_prepare(stmt, sql, 0); + checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare"); + + // bind table name and tags + TAOS_MULTI_BIND tags[2]; + prepareBindTags(tags); + code = taos_stmt_set_tbname_tags(stmt, "d1001", tags); + checkErrorCode(stmt, code, "failed to execute taos_stmt_set_tbname_tags"); + + TAOS_MULTI_BIND params[4]; + prepareBindParams(params, ts, current, voltage, phase); + + code = taos_stmt_bind_param_batch(stmt, params); // bind batch + checkErrorCode(stmt, code, "failed to execute taos_stmt_bind_param_batch"); + + code = taos_stmt_add_batch(stmt); // add batch + checkErrorCode(stmt, code, "failed to execute taos_stmt_add_batch"); + + // execute + code = taos_stmt_execute(stmt); + checkErrorCode(stmt, code, "failed to execute taos_stmt_execute"); + + int affectedRows = taos_stmt_affected_rows(stmt); + printf("successfully inserted %d rows\n", affectedRows); + + // close + (void)taos_stmt_close(stmt); +} + +void insertDataInterlace(TAOS *taos, int64_t *ts, float *current, int *voltage, float *phase) { + // init with interlace mode + TAOS_STMT_OPTIONS op; + op.reqId = 0; + op.singleStbInsert = true; + op.singleTableBindOnce = true; + TAOS_STMT *stmt = taos_stmt_init_with_options(taos, &op); + + // prepare + const char *sql = "INSERT INTO ? values(?, ?, ?, ?)"; + int code = taos_stmt_prepare(stmt, sql, 0); + checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare"); + + // bind table name and tags + TAOS_MULTI_BIND tags[2]; + prepareBindTags(tags); + code = taos_stmt_set_tbname_tags(stmt, "d1001", tags); + checkErrorCode(stmt, code, "failed to execute taos_stmt_set_tbname_tags"); + + TAOS_MULTI_BIND params[4]; + prepareBindParams(params, ts, current, voltage, phase); + + code = taos_stmt_bind_param_batch(stmt, params); // bind batch + checkErrorCode(stmt, code, "failed to execute taos_stmt_bind_param_batch"); + + code = taos_stmt_add_batch(stmt); // add batch + checkErrorCode(stmt, code, "failed to execute taos_stmt_add_batch"); + + // execute + code = taos_stmt_execute(stmt); + checkErrorCode(stmt, code, "failed to execute taos_stmt_execute"); + + int affectedRows = taos_stmt_affected_rows(stmt); + printf("successfully inserted %d rows\n", affectedRows); + + // close + (void)taos_stmt_close(stmt); +} + +int main() { + TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 6030); + if (taos == NULL) { + printf("failed to connect to server\n"); + exit(EXIT_FAILURE); + } + executeSQL(taos, "DROP DATABASE IF EXISTS power"); + executeSQL(taos, "CREATE DATABASE power"); + executeSQL(taos, "USE power"); + executeSQL(taos, + "CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), " + "groupId INT)"); + + // initial insert, expect insert 3 rows + int64_t ts0[] = {1648432611234, 1648432611345, 1648432611456}; + float current0[] = {10.1f, 10.2f, 10.3f}; + int voltage0[] = {216, 217, 218}; + float phase0[] = {0.31f, 0.32f, 0.33f}; + insertData(taos, ts0, current0, voltage0, phase0); + + // insert with interlace mode, send non-duplicate ts, expect insert 3 overlapped rows + int64_t ts1[] = {1648432611234, 1648432611345, 1648432611456}; + int voltage1[] = {219, 220, 221}; + insertDataInterlace(taos, ts1, current0, voltage1, phase0); + + // insert with interlace mode, send duplicate ts, expect insert 2 rows with dups merged + int64_t ts2[] = {1648432611678, 1648432611678, 1648432611789}; + int voltage2[] = {222, 223, 224}; + insertDataInterlace(taos, ts2, current0, voltage2, phase0); + + // insert with interlace mode, send disordered rows, expect insert 3 sorted rows + int64_t ts3[] = {1648432611900, 1648432611890, 1648432611910}; + int voltage3[] = {225, 226, 227}; + insertDataInterlace(taos, ts3, current0, voltage3, phase0); + + // insert with interlace mode, send disordered and duplicate rows, expect insert 2 sorted and dup-merged rows + int64_t ts4[] = {1648432611930, 1648432611920, 1648432611930}; + int voltage4[] = {228, 229, 230}; + insertDataInterlace(taos, ts4, current0, voltage4, phase0); + + taos_close(taos); + taos_cleanup(); + + // final results + // taos> select * from d1001; + // ts | current | voltage | phase | + // ====================================================================================== + // 2022-03-28 09:56:51.234 | 10.1000004 | 219 | 0.3100000 | + // 2022-03-28 09:56:51.345 | 10.1999998 | 220 | 0.3200000 | + // 2022-03-28 09:56:51.456 | 10.3000002 | 221 | 0.3300000 | + // 2022-03-28 09:56:51.678 | 10.1999998 | 223 | 0.3200000 | + // 2022-03-28 09:56:51.789 | 10.3000002 | 224 | 0.3300000 | + // 2022-03-28 09:56:51.890 | 10.1999998 | 226 | 0.3200000 | + // 2022-03-28 09:56:51.900 | 10.1000004 | 225 | 0.3100000 | + // 2022-03-28 09:56:51.910 | 10.3000002 | 227 | 0.3300000 | + // 2022-03-28 09:56:51.920 | 10.1999998 | 229 | 0.3200000 | + // 2022-03-28 09:56:51.930 | 10.3000002 | 230 | 0.3300000 | + // Query OK, 10 row(s) in set (0.005083s) +} + diff --git a/tests/script/api/stmt2-insert-dupkeys.c b/tests/script/api/stmt2-insert-dupkeys.c new file mode 100644 index 0000000000..adab3ddf39 --- /dev/null +++ b/tests/script/api/stmt2-insert-dupkeys.c @@ -0,0 +1,235 @@ +#include +#include +#include +#include +#include +#include "taos.h" + +int CTB_NUMS = 3; +int ROW_NUMS = 3; + +void do_query(TAOS* taos, const char* sql) { + TAOS_RES* result = taos_query(taos, sql); + int code = taos_errno(result); + if (code) { + printf("failed to query: %s, reason:%s\n", sql, taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); +} + +void createdb(TAOS* taos) { + do_query(taos, "drop database if exists db"); + do_query(taos, "create database db"); + do_query(taos, "create stable db.stb (ts timestamp, b binary(10)) tags(t1 int, t2 binary(10))"); + do_query(taos, "use db"); +} + +#define INIT(tbs, ts, ts_len, b, b_len, tags, paramv) \ +do { \ + /* tbname */ \ + tbs = (char**)malloc(CTB_NUMS * sizeof(char*)); \ + for (int i = 0; i < CTB_NUMS; i++) { \ + tbs[i] = (char*)malloc(sizeof(char) * 20); \ + sprintf(tbs[i], "ctb_%d", i); \ + } \ + /* col params */ \ + ts = (int64_t**)malloc(CTB_NUMS * sizeof(int64_t*)); \ + b = (char**)malloc(CTB_NUMS * sizeof(char*)); \ + ts_len = (int*)malloc(ROW_NUMS * sizeof(int)); \ + b_len = (int*)malloc(ROW_NUMS * sizeof(int)); \ + for (int i = 0; i < ROW_NUMS; i++) { \ + ts_len[i] = sizeof(int64_t); \ + b_len[i] = 1; \ + } \ + for (int i = 0; i < CTB_NUMS; i++) { \ + ts[i] = (int64_t*)malloc(ROW_NUMS * sizeof(int64_t)); \ + b[i] = (char*)malloc(ROW_NUMS * sizeof(char)); \ + for (int j = 0; j < ROW_NUMS; j++) { \ + ts[i][j] = 1591060628000 + j; \ + b[i][j] = (char)('a' + j); \ + } \ + } \ + /*tag params */ \ + int t1 = 0; \ + int t1len = sizeof(int); \ + int t2len = 3; \ + /* bind params */ \ + paramv = (TAOS_STMT2_BIND**)malloc(CTB_NUMS * sizeof(TAOS_STMT2_BIND*)); \ + tags = (TAOS_STMT2_BIND**)malloc(CTB_NUMS * sizeof(TAOS_STMT2_BIND*)); \ + for (int i = 0; i < CTB_NUMS; i++) { \ + /* create tags */ \ + tags[i] = (TAOS_STMT2_BIND*)malloc(2 * sizeof(TAOS_STMT2_BIND)); \ + tags[i][0] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_INT, &t1, &t1len, NULL, 0}; \ + tags[i][1] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_BINARY, "after", &t2len, NULL, 0}; \ + /* create col params */ \ + paramv[i] = (TAOS_STMT2_BIND*)malloc(2 * sizeof(TAOS_STMT2_BIND)); \ + paramv[i][0] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_TIMESTAMP, &ts[i][0], &ts_len[0], NULL, ROW_NUMS}; \ + paramv[i][1] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_BINARY, &b[i][0], &b_len[0], NULL, ROW_NUMS}; \ + } \ +} while (0) + +#define UINIT(tbs, ts, ts_len, b, b_len, tags, paramv) \ +do { \ + for (int i = 0; i < CTB_NUMS; i++) { \ + free(tbs[i]); \ + } \ + free(tbs); \ + for (int i = 0; i < CTB_NUMS; i++) { \ + free(ts[i]); \ + free(b[i]); \ + } \ + free(ts); \ + free(b); \ + free(ts_len); \ + free(b_len); \ + for (int i = 0; i < CTB_NUMS; i++) { \ + free(tags[i]); \ + free(paramv[i]); \ + } \ + free(tags); \ + free(paramv); \ +} while (0) + +void insert(TAOS* taos, char **tbs, TAOS_STMT2_BIND **tags, TAOS_STMT2_BIND **paramv, const char* sql) +{ + clock_t start, end; + double cpu_time_used; + + TAOS_STMT2_OPTION option = {0, true, true, NULL, NULL}; + TAOS_STMT2 *stmt = taos_stmt2_init(taos, &option); + int code = taos_stmt2_prepare(stmt, sql, 0); + if (code != 0) { + printf("failed to execute taos_stmt2_prepare. error:%s\n", taos_stmt2_error(stmt)); + taos_stmt2_close(stmt); + exit(EXIT_FAILURE); + } + + // bind + start = clock(); + TAOS_STMT2_BINDV bindv = {CTB_NUMS, tbs, tags, paramv}; + if (taos_stmt2_bind_param(stmt, &bindv, -1)) { + printf("failed to execute taos_stmt2_bind_param statement.error:%s\n", taos_stmt2_error(stmt)); + taos_stmt2_close(stmt); + exit(EXIT_FAILURE); + } + end = clock(); + cpu_time_used = ((double)(end - start)) / CLOCKS_PER_SEC; + printf("stmt2-bind [%s] insert Time used: %f seconds\n", sql, cpu_time_used); + start = clock(); + + // exec + if (taos_stmt2_exec(stmt, NULL)) { + printf("failed to execute taos_stmt2_exec statement.error:%s\n", taos_stmt2_error(stmt)); + taos_stmt2_close(stmt); + exit(EXIT_FAILURE); + } + end = clock(); + cpu_time_used = ((double)(end - start)) / CLOCKS_PER_SEC; + printf("stmt2-exec [%s] insert Time used: %f seconds\n", sql, cpu_time_used); + + taos_stmt2_close(stmt); +} + +void insert_dist(TAOS* taos, const char *sql) { + char **tbs, **b; + int64_t **ts; + int *ts_len, *b_len; + TAOS_STMT2_BIND **paramv, **tags; + + INIT(tbs, ts, ts_len, b, b_len, tags, paramv); + + insert(taos, tbs, tags, paramv, sql); + + UINIT(tbs, ts, ts_len, b, b_len, tags, paramv); +} + +void insert_dup_rows(TAOS* taos, const char *sql) { + char **tbs, **b; + int64_t **ts; + int *ts_len, *b_len; + TAOS_STMT2_BIND **paramv, **tags; + + INIT(tbs, ts, ts_len, b, b_len, tags, paramv); + + // insert duplicate rows + for (int i = 0; i < CTB_NUMS; i++) { + for (int j = 0; j < ROW_NUMS; j++) { + ts[i][j] = 1591060628000; + b[i][j] = (char)('x' + j); + } + } + for (int i = 0; i < CTB_NUMS; i++) { + paramv[i][0] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_TIMESTAMP, &ts[i][0], &ts_len[0], NULL, ROW_NUMS}; + paramv[i][1] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_BINARY, &b[i][0], &b_len[0], NULL, ROW_NUMS}; + } + insert(taos, tbs, tags, paramv, sql); + + UINIT(tbs, ts, ts_len, b, b_len, tags, paramv); +} + +void insert_dup_tables(TAOS* taos, const char *sql) { + char **tbs, **b; + int64_t **ts; + int *ts_len, *b_len; + TAOS_STMT2_BIND **paramv, **tags; + + INIT(tbs, ts, ts_len, b, b_len, tags, paramv); + + for (int i = 0; i < CTB_NUMS; i++) { + sprintf(tbs[i], "ctb_%d", i % 2); + } + + for (int i = 0; i < CTB_NUMS; i++) { + paramv[i][0] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_TIMESTAMP, &ts[i][0], &ts_len[0], NULL, ROW_NUMS}; + paramv[i][1] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_BINARY, &b[i][0], &b_len[0], NULL, ROW_NUMS}; + } + insert(taos, tbs, tags, paramv, sql); + + UINIT(tbs, ts, ts_len, b, b_len, tags, paramv); +} + +int main() { + TAOS* taos = taos_connect("localhost", "root", "taosdata", "", 0); + if (!taos) { + printf("failed to connect to db, reason:%s\n", taos_errstr(taos)); + exit(EXIT_FAILURE); + } + + createdb(taos); + // insert distinct rows + insert_dist(taos, "insert into db.? using db.stb tags(?,?)values(?,?)"); + // insert duplicate rows + insert_dup_rows(taos, "insert into db.? values(?,?)"); + // insert duplicate tables + insert_dup_tables(taos, "insert into db.? values(?,?)"); + + taos_close(taos); + taos_cleanup(); +} + +// final results +// taos> select * from ctb_0; +// ts | b | +// ========================================= +// 2020-06-02 09:17:08.000 | z | +// 2020-06-02 09:17:08.001 | b | +// 2020-06-02 09:17:08.002 | c | +// Query OK, 3 row(s) in set (0.003975s) +// +// taos> select * from ctb_1; +// ts | b | +// ========================================= +// 2020-06-02 09:17:08.000 | z | +// 2020-06-02 09:17:08.001 | b | +// 2020-06-02 09:17:08.002 | c | +// Query OK, 3 row(s) in set (0.007241s) + +// taos> select * from ctb_2; +// ts | b | +// ========================================= +// 2020-06-02 09:17:08.000 | z | +// 2020-06-02 09:17:08.001 | b | +// 2020-06-02 09:17:08.002 | c | +// Query OK, 3 row(s) in set (0.005443s) diff --git a/tests/script/api/stmt2-performance.c b/tests/script/api/stmt2-performance.c index aa8e5b9450..a539affaf1 100644 --- a/tests/script/api/stmt2-performance.c +++ b/tests/script/api/stmt2-performance.c @@ -5,9 +5,9 @@ #include #include "taos.h" -int CTB_NUMS = 1000; -int ROW_NUMS = 10; -int CYC_NUMS = 5; +int CTB_NUMS = 2; +int ROW_NUMS = 2; +int CYC_NUMS = 2; void do_query(TAOS* taos, const char* sql) { TAOS_RES* result = taos_query(taos, sql); @@ -57,7 +57,7 @@ void do_stmt(TAOS* taos, const char* sql) { return; } int fieldNum = 0; - TAOS_FIELD_STB* pFields = NULL; + TAOS_FIELD_ALL* pFields = NULL; // code = taos_stmt2_get_stb_fields(stmt, &fieldNum, &pFields); // if (code != 0) { // printf("failed get col,ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_stmt2_error(stmt)); @@ -74,7 +74,7 @@ void do_stmt(TAOS* taos, const char* sql) { for (int i = 0; i < CTB_NUMS; i++) { tbs[i] = (char*)malloc(sizeof(char) * 20); sprintf(tbs[i], "ctb_%d", i); - createCtb(taos, tbs[i]); + // createCtb(taos, tbs[i]); } for (int r = 0; r < CYC_NUMS; r++) { // col params @@ -138,7 +138,24 @@ void do_stmt(TAOS* taos, const char* sql) { end = clock(); cpu_time_used = ((double)(end - start)) / CLOCKS_PER_SEC; printf("stmt2-exec [%s] insert Time used: %f seconds\n", sql, cpu_time_used); + + for (int i = 0; i < CTB_NUMS; i++) { + free(tags[i]); + free(paramv[i]); + free(ts[i]); + free(b[i]); + } + free(ts); + free(b); + free(ts_len); + free(b_len); + free(paramv); + free(tags); } + for (int i = 0; i < CTB_NUMS; i++) { + free(tbs[i]); + } + free(tbs); // taos_stmt2_free_fields(stmt, pFields); taos_stmt2_close(stmt); @@ -200,10 +217,9 @@ int main() { exit(1); } - sleep(3); do_stmt(taos, "insert into db.stb(tbname,ts,b,t1,t2) values(?,?,?,?,?)"); // do_stmt(taos, "insert into db.? using db.stb tags(?,?)values(?,?)"); - do_taosc(taos); + // do_taosc(taos); taos_close(taos); taos_cleanup(); } diff --git a/tests/script/api/test.sh b/tests/script/api/test.sh new file mode 100755 index 0000000000..8b93484fe1 --- /dev/null +++ b/tests/script/api/test.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +make clean + +make + +pgrep taosd || taosd >> /dev/null 2>&1 & + +sleep 10 + +./dbTableRoute localhost +./batchprepare localhost +./stmt-crash localhost +./insertSameTs localhost +./passwdTest localhost +./whiteListTest localhost +./tmqViewTest + diff --git a/tests/script/tsim/join/join_explain.sim b/tests/script/tsim/join/join_explain.sim index f9d2f3eac1..2858999de5 100644 --- a/tests/script/tsim/join/join_explain.sim +++ b/tests/script/tsim/join/join_explain.sim @@ -39,6 +39,7 @@ sql explain analyze verbose true select a.ts from sta a join sta b on a.col1 = b sql explain analyze verbose true select a.ts from sta a join sta b where a.ts=b.ts; sql_error explain analyze verbose true select a.ts from sta a ,sta b on a.ts=b.ts; sql explain analyze verbose true select a.ts from sta a ,sta b where a.ts=b.ts; +sql explain analyze verbose true select a.ts from sta a ,sta b where a.t1 = b.t1 and a.ts=b.ts; sql explain analyze verbose true select a.ts from sta a ,sta b where a.ts=b.ts and a.col1 + 1 = b.col1; sql explain analyze verbose true select b.col1 from sta a ,sta b where a.ts=b.ts and a.col1 + 1 = b.col1 order by a.ts; sql explain analyze verbose true select b.col1 from sta a join sta b join sta c where a.ts=b.ts and b.ts = c.ts order by a.ts; diff --git a/tests/script/tsim/scalar/in.sim b/tests/script/tsim/scalar/in.sim index a2164675f0..0ffe6f5100 100644 --- a/tests/script/tsim/scalar/in.sim +++ b/tests/script/tsim/scalar/in.sim @@ -44,7 +44,7 @@ if $data20 != @ Time Range: [-9223372036854775808, 9223372036854775807]@ th endi sql select * from tb1 where fbool in (0, 3); -if $rows != 5 then +if $rows != 3 then return -1 endi @@ -69,7 +69,7 @@ if $rows != 10 then endi sql select * from st1 where tbool in (0, 3); -if $rows != 15 then +if $rows != 5 then return -1 endi diff --git a/tests/script/tsim/show/basic.sim b/tests/script/tsim/show/basic.sim index 4cabf907c6..4c4f9da912 100644 --- a/tests/script/tsim/show/basic.sim +++ b/tests/script/tsim/show/basic.sim @@ -254,5 +254,7 @@ if $rows <= 0 then return -1 endi +system taos -P7100 -d db -s " show create table db.t0" + system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode2 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/pauseAndResume.sim b/tests/script/tsim/stream/pauseAndResume.sim index 1f4caf5c03..4cc193dd79 100644 --- a/tests/script/tsim/stream/pauseAndResume.sim +++ b/tests/script/tsim/stream/pauseAndResume.sim @@ -398,4 +398,204 @@ endi print ===== step5 over +print ===== step6 +sql drop database if exists test6; +sql create database test7 vgroups 1; +sql use test7; +sql create stable st(ts timestamp, a int, b int , c int, d double) tags(ta int,tb int,tc int); +sql create table ts1 using st tags(1,1,1); + +sql create stream streams8 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 watermark 1d into streamt8 as select _wstart, count(*) c1 from st interval(10s); +sql create stream streams9 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 watermark 1d into streamt9 as select _wstart, count(*) c1 from st partition by tbname interval(10s); + +run tsim/stream/checkTaskStatus.sim + +$loop_count = 0 +loop7: + +$loop_count = $loop_count + 1 +if $loop_count == 40 then + return -1 +endi + +sleep 500 + +sql select status, * from information_schema.ins_streams where status != "ready"; + +if $rows != 0 then + print $data00 $data01 $data02 $data03 $data04 + print $data10 $data11 $data12 $data13 $data14 + print $data20 $data21 $data22 $data23 $data24 + print $data30 $data31 $data32 $data33 $data34 + print $data40 $data41 $data42 $data43 $data44 + print $data50 $data51 $data52 $data53 $data54 + goto loop7 +endi + +sql pause stream streams8; + +sql pause stream streams9; + +sql pause stream streams8; + +sql pause stream streams9; + +sleep 1000 + +sql pause stream streams8; + +sql pause stream streams9; + +sleep 1000 + +$loop_count = 0 +loop80: + +$loop_count = $loop_count + 1 +if $loop_count == 40 then + print pause stream failed + goto end_step_6 +endi + +sleep 1000 + +sql select status, * from information_schema.ins_stream_tasks where status != "paused"; + +if $rows != 2 then + print $data00 $data01 $data02 $data03 $data04 + print $data10 $data11 $data12 $data13 $data14 + print $data20 $data21 $data22 $data23 $data24 + print $data30 $data31 $data32 $data33 $data34 + print $data40 $data41 $data42 $data43 $data44 + print $data50 $data51 $data52 $data53 $data54 + goto loop80 +endi + +$loop_count = 0 +loop8: + +$loop_count = $loop_count + 1 +if $loop_count == 40 then + return -1 +endi + +sleep 1000 + +sql select status, * from information_schema.ins_streams where status == "paused"; + +if $rows != 2 then + print $data00 $data01 $data02 $data03 $data04 + print $data10 $data11 $data12 $data13 $data14 + print $data20 $data21 $data22 $data23 $data24 + print $data30 $data31 $data32 $data33 $data34 + print $data40 $data41 $data42 $data43 $data44 + print $data50 $data51 $data52 $data53 $data54 + goto loop8 +endi + + +sql resume stream streams8; + +sql resume stream streams9; + +sql resume stream streams8; + +sql resume stream streams9; + +sleep 1000 + +sql resume stream streams8; + +sql resume stream streams9; + +sleep 1000 + + +$loop_count = 0 +loop90: + +$loop_count = $loop_count + 1 +if $loop_count == 40 then + print pause stream failed + goto end_step_6 +endi + +sleep 1000 + +sql select status, * from information_schema.ins_stream_tasks where status == "paused"; + +if $rows != 0 then + print $data00 $data01 $data02 $data03 $data04 + print $data10 $data11 $data12 $data13 $data14 + print $data20 $data21 $data22 $data23 $data24 + print $data30 $data31 $data32 $data33 $data34 + print $data40 $data41 $data42 $data43 $data44 + print $data50 $data51 $data52 $data53 $data54 + goto loop90 +endi + +$loop_count = 0 +loop9: + +$loop_count = $loop_count + 1 +if $loop_count == 40 then + return -1 +endi + +sleep 1000 + +sql select status, * from information_schema.ins_streams where status != "paused"; + +if $rows != 2 then + print $data00 $data01 $data02 $data03 $data04 + print $data10 $data11 $data12 $data13 $data14 + print $data20 $data21 $data22 $data23 $data24 + print $data30 $data31 $data32 $data33 $data34 + print $data40 $data41 $data42 $data43 $data44 + print $data50 $data51 $data52 $data53 $data54 + goto loop9 +endi + +run tsim/stream/checkTaskStatus.sim + +sql insert into ts1 values(1648791213001,1,12,3,1.0); + +$loop_count = 0 +loop11: + +$loop_count = $loop_count + 1 +if $loop_count == 40 then + return -1 +endi + +sleep 1000 + +sql select * from streamt8; + +if $rows != 1 then + print $data00 $data01 $data02 $data03 $data04 + print $data10 $data11 $data12 $data13 $data14 + print $data20 $data21 $data22 $data23 $data24 + print $data30 $data31 $data32 $data33 $data34 + print $data40 $data41 $data42 $data43 $data44 + print $data50 $data51 $data52 $data53 $data54 + goto loop11 +endi + +sql select * from streamt9; + +if $rows != 1 then + print $data00 $data01 $data02 $data03 $data04 + print $data10 $data11 $data12 $data13 $data14 + print $data20 $data21 $data22 $data23 $data24 + print $data30 $data31 $data32 $data33 $data34 + print $data40 $data41 $data42 $data43 $data44 + print $data50 $data51 $data52 $data53 $data54 + goto loop11 +endi + +end_step_6: + +print ===== step6 over + system sh/stop_dnodes.sh diff --git a/tests/script/tsim/stream/streamInterpDelete0.sim b/tests/script/tsim/stream/streamInterpDelete0.sim index 21bac13e4a..440d7ce413 100644 --- a/tests/script/tsim/stream/streamInterpDelete0.sim +++ b/tests/script/tsim/stream/streamInterpDelete0.sim @@ -4,6 +4,8 @@ system sh/exec.sh -n dnode1 -s start sleep 50 sql connect +sql alter local 'streamCoverage' '1'; + print step1 print =============== create database sql create database test vgroups 1; diff --git a/tests/script/tsim/stream/streamInterpDelete1.sim b/tests/script/tsim/stream/streamInterpDelete1.sim index 162da175e8..9413cf8918 100644 --- a/tests/script/tsim/stream/streamInterpDelete1.sim +++ b/tests/script/tsim/stream/streamInterpDelete1.sim @@ -4,6 +4,8 @@ system sh/exec.sh -n dnode1 -s start sleep 50 sql connect +sql alter local 'streamCoverage' '1'; + print step1 print =============== create database sql create database test vgroups 1; diff --git a/tests/script/tsim/stream/streamInterpDelete2.sim b/tests/script/tsim/stream/streamInterpDelete2.sim index be27dcda49..fb53678eff 100644 --- a/tests/script/tsim/stream/streamInterpDelete2.sim +++ b/tests/script/tsim/stream/streamInterpDelete2.sim @@ -4,6 +4,8 @@ system sh/exec.sh -n dnode1 -s start sleep 50 sql connect +sql alter local 'streamCoverage' '1'; + print step1 print =============== create database sql create database test vgroups 1; diff --git a/tests/script/tsim/stream/streamInterpError.sim b/tests/script/tsim/stream/streamInterpError.sim index 53a92df772..f0f4e80ade 100644 --- a/tests/script/tsim/stream/streamInterpError.sim +++ b/tests/script/tsim/stream/streamInterpError.sim @@ -4,6 +4,8 @@ system sh/exec.sh -n dnode1 -s start sleep 50 sql connect +sql alter local 'streamCoverage' '1'; + print step2 sql create database test2 vgroups 1; diff --git a/tests/script/tsim/stream/streamInterpHistory.sim b/tests/script/tsim/stream/streamInterpHistory.sim index b9685ebf05..9737e7d155 100644 --- a/tests/script/tsim/stream/streamInterpHistory.sim +++ b/tests/script/tsim/stream/streamInterpHistory.sim @@ -4,6 +4,8 @@ system sh/exec.sh -n dnode1 -s start sleep 50 sql connect +sql alter local 'streamCoverage' '1'; + print step1 print =============== create database sql create database test vgroups 1; diff --git a/tests/script/tsim/stream/streamInterpLarge.sim b/tests/script/tsim/stream/streamInterpLarge.sim index 85203d2d9e..2626f49b6a 100644 --- a/tests/script/tsim/stream/streamInterpLarge.sim +++ b/tests/script/tsim/stream/streamInterpLarge.sim @@ -4,6 +4,8 @@ system sh/exec.sh -n dnode1 -s start sleep 50 sql connect +sql alter local 'streamCoverage' '1'; + print step1 print =============== create database sql create database test vgroups 1; diff --git a/tests/script/tsim/stream/streamInterpLinear0.sim b/tests/script/tsim/stream/streamInterpLinear0.sim index 7d4b28d545..c52540895b 100644 --- a/tests/script/tsim/stream/streamInterpLinear0.sim +++ b/tests/script/tsim/stream/streamInterpLinear0.sim @@ -4,6 +4,8 @@ system sh/exec.sh -n dnode1 -s start sleep 50 sql connect +sql alter local 'streamCoverage' '1'; + print step1 print =============== create database sql create database test vgroups 1; diff --git a/tests/script/tsim/stream/streamInterpNext0.sim b/tests/script/tsim/stream/streamInterpNext0.sim index abdbeda634..4395031aec 100644 --- a/tests/script/tsim/stream/streamInterpNext0.sim +++ b/tests/script/tsim/stream/streamInterpNext0.sim @@ -4,6 +4,8 @@ system sh/exec.sh -n dnode1 -s start sleep 50 sql connect +sql alter local 'streamCoverage' '1'; + print step1 print =============== create database sql create database test vgroups 1; diff --git a/tests/script/tsim/stream/streamInterpOther.sim b/tests/script/tsim/stream/streamInterpOther.sim index 8553e67ec8..4572bfca56 100644 --- a/tests/script/tsim/stream/streamInterpOther.sim +++ b/tests/script/tsim/stream/streamInterpOther.sim @@ -4,6 +4,8 @@ system sh/exec.sh -n dnode1 -s start sleep 50 sql connect +sql alter local 'streamCoverage' '1'; + print step1 print =============== create database sql create database test vgroups 4; diff --git a/tests/script/tsim/stream/streamInterpPartitionBy0.sim b/tests/script/tsim/stream/streamInterpPartitionBy0.sim index 6b222de228..543bb48a1c 100644 --- a/tests/script/tsim/stream/streamInterpPartitionBy0.sim +++ b/tests/script/tsim/stream/streamInterpPartitionBy0.sim @@ -4,6 +4,8 @@ system sh/exec.sh -n dnode1 -s start sleep 50 sql connect +sql alter local 'streamCoverage' '1'; + print step prev print =============== create database sql create database test vgroups 1; diff --git a/tests/script/tsim/stream/streamInterpPartitionBy1.sim b/tests/script/tsim/stream/streamInterpPartitionBy1.sim index ecb5e0ee62..c8138ac05f 100644 --- a/tests/script/tsim/stream/streamInterpPartitionBy1.sim +++ b/tests/script/tsim/stream/streamInterpPartitionBy1.sim @@ -4,6 +4,8 @@ system sh/exec.sh -n dnode1 -s start sleep 50 sql connect +sql alter local 'streamCoverage' '1'; + print step NULL print =============== create database sql create database test vgroups 1; diff --git a/tests/script/tsim/stream/streamInterpPrimaryKey0.sim b/tests/script/tsim/stream/streamInterpPrimaryKey0.sim index 9edddff6db..1bbc2a9b5d 100644 --- a/tests/script/tsim/stream/streamInterpPrimaryKey0.sim +++ b/tests/script/tsim/stream/streamInterpPrimaryKey0.sim @@ -4,6 +4,8 @@ system sh/exec.sh -n dnode1 -s start sleep 50 sql connect +sql alter local 'streamCoverage' '1'; + print step1 print =============== create database sql create database test vgroups 1; diff --git a/tests/script/tsim/stream/streamInterpPrimaryKey1.sim b/tests/script/tsim/stream/streamInterpPrimaryKey1.sim index 04a1f299be..0db33c9767 100644 --- a/tests/script/tsim/stream/streamInterpPrimaryKey1.sim +++ b/tests/script/tsim/stream/streamInterpPrimaryKey1.sim @@ -4,6 +4,8 @@ system sh/exec.sh -n dnode1 -s start sleep 50 sql connect +sql alter local 'streamCoverage' '1'; + print step1 print =============== create database sql create database test vgroups 1; diff --git a/tests/script/tsim/stream/streamInterpPrimaryKey2.sim b/tests/script/tsim/stream/streamInterpPrimaryKey2.sim index f06e1ecd03..0574a1ceec 100644 --- a/tests/script/tsim/stream/streamInterpPrimaryKey2.sim +++ b/tests/script/tsim/stream/streamInterpPrimaryKey2.sim @@ -4,6 +4,8 @@ system sh/exec.sh -n dnode1 -s start sleep 50 sql connect +sql alter local 'streamCoverage' '1'; + print step1 print =============== create database sql create database test vgroups 1; diff --git a/tests/script/tsim/stream/streamInterpPrimaryKey3.sim b/tests/script/tsim/stream/streamInterpPrimaryKey3.sim index 725cf8d850..23cb0a58e6 100644 --- a/tests/script/tsim/stream/streamInterpPrimaryKey3.sim +++ b/tests/script/tsim/stream/streamInterpPrimaryKey3.sim @@ -4,6 +4,8 @@ system sh/exec.sh -n dnode1 -s start sleep 50 sql connect +sql alter local 'streamCoverage' '1'; + print step1 print =============== create database sql create database test vgroups 1; diff --git a/tests/script/tsim/stream/streamInterpUpdate.sim b/tests/script/tsim/stream/streamInterpUpdate.sim index 59a188c2a6..394ac1a341 100644 --- a/tests/script/tsim/stream/streamInterpUpdate.sim +++ b/tests/script/tsim/stream/streamInterpUpdate.sim @@ -4,6 +4,8 @@ system sh/exec.sh -n dnode1 -s start sleep 50 sql connect +sql alter local 'streamCoverage' '1'; + print step1 print =============== create database sql create database test vgroups 1; diff --git a/tests/script/tsim/stream/streamInterpUpdate1.sim b/tests/script/tsim/stream/streamInterpUpdate1.sim index 45f16af35d..3987afa21e 100644 --- a/tests/script/tsim/stream/streamInterpUpdate1.sim +++ b/tests/script/tsim/stream/streamInterpUpdate1.sim @@ -4,6 +4,8 @@ system sh/exec.sh -n dnode1 -s start sleep 50 sql connect +sql alter local 'streamCoverage' '1'; + print step1 print =============== create database sql create database test vgroups 1; diff --git a/tests/script/tsim/stream/streamInterpUpdate2.sim b/tests/script/tsim/stream/streamInterpUpdate2.sim index 2a71474dd7..cde5b589e8 100644 --- a/tests/script/tsim/stream/streamInterpUpdate2.sim +++ b/tests/script/tsim/stream/streamInterpUpdate2.sim @@ -4,6 +4,8 @@ system sh/exec.sh -n dnode1 -s start sleep 50 sql connect +sql alter local 'streamCoverage' '1'; + print step1 print =============== create database sql create database test vgroups 1; diff --git a/tests/script/tsim/stream/streamInterpValue0.sim b/tests/script/tsim/stream/streamInterpValue0.sim index bce7f0ece6..2cbf61f4bd 100644 --- a/tests/script/tsim/stream/streamInterpValue0.sim +++ b/tests/script/tsim/stream/streamInterpValue0.sim @@ -4,6 +4,8 @@ system sh/exec.sh -n dnode1 -s start sleep 50 sql connect +sql alter local 'streamCoverage' '1'; + print step1 print =============== create database sql create database test vgroups 1; diff --git a/tests/script/tsim/tagindex/indexOverflow.sim b/tests/script/tsim/tagindex/indexOverflow.sim index 9e297099d1..99b9023f11 100644 --- a/tests/script/tsim/tagindex/indexOverflow.sim +++ b/tests/script/tsim/tagindex/indexOverflow.sim @@ -76,7 +76,23 @@ while $i < $maxTinyLimit $i = $i + 1 endw +print =============== create database with big tags +$bigTagDb = db +$bigTagStb = stb +$bigTagTb = tb +sql create database $bigTagDb +sql use $bigTagDb + +sql create table $bigTagStb (ts timestamp, f1 int) tags(t1 nchar(100), t2 nchar(800), t3 nchar(800), t4 nchar(800)); + +sql insert into $bigTagTb using $bigTagStb tags("abc", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx") values(now, 1); + +$tb = $bigTagTb . 2 +sql insert into $tb using $bigTagStb tags("abc", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx") values(now, 1); + +$tb = $bigTagTb . 3 +sql insert into $tb using $bigTagStb tags("abc", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx") values(now, 1); -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/user/password.sim b/tests/script/tsim/user/password.sim index 729097e7e1..7d1eff2f0b 100644 --- a/tests/script/tsim/user/password.sim +++ b/tests/script/tsim/user/password.sim @@ -271,5 +271,16 @@ sql create user u25 pass 'taosdata1~' sql create user u26 pass 'taosdata1,' sql create user u27 pass 'taosdata1.' -return +sql CREATE USER `_xTest1` PASS '2729c41a99b2c5222aa7dd9fc1ce3de7' SYSINFO 1 CREATEDB 0 IS_IMPORT 1 HOST '127.0.0.1'; +sql_error CREATE USER `_xTest2` PASS '2729c41a99b2c5222aa7dd9fc1ce3de7' SYSINFO 1 CREATEDB 0 IS_IMPORT 0 HOST '127.0.0.1'; +sql CREATE USER `_xTest3` PASS '2729c41' SYSINFO 1 CREATEDB 0 IS_IMPORT 1 HOST '127.0.0.1'; +sql_error CREATE USER `_xTest4` PASS '2729c417' SYSINFO 1 CREATEDB 0 IS_IMPORT 0 HOST '127.0.0.1'; +sql CREATE USER `_xTest5` PASS '2xF' SYSINFO 1 CREATEDB 0 IS_IMPORT 1 HOST '127.0.0.1'; +sql_error CREATE USER `_xTest6` PASS '2xF' SYSINFO 1 CREATEDB 0 IS_IMPORT 0 HOST '127.0.0.1'; + + +sql_error alter USER `_xTest1` PASS '2729c41a99b2c5222aa7dd9fc1ce3de7'; +sql_error alter USER `_xTest1` PASS '2729c417'; +sql_error alter USER `_xTest1` PASS '2xF'; + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/setup-lcov.sh b/tests/setup-lcov.sh new file mode 100644 index 0000000000..0d1861fc92 --- /dev/null +++ b/tests/setup-lcov.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash + +function usage() { + echo "Usage: $0 -v " + echo "Example: $0 -v 1.14" +} + +function download_lcov() { + local version=$1 + local url="https://github.com/linux-test-project/lcov/releases/download/v${version}/lcov-${version}.tar.gz" + echo "Downloading lcov version ${version} from ${url}..." + curl -LO ${url} + tar -xzf lcov-${version}.tar.gz + echo "lcov version ${version} downloaded and extracted." +} + +function install_lcov() { + echo -e "\nInstalling..." + local version=$1 + cd lcov-${version} + sudo make uninstall && sudo make install + cd .. + echo "lcov version ${version} installed." +} + +function verify_lcov() { + echo -e "\nVerify installation..." + lcov --version +} + +function main() { + if [[ "$#" -ne 2 ]]; then + usage + exit 1 + fi + + while getopts "v:h" opt; do + case ${opt} in + v) + version=${OPTARG} + download_lcov ${version} + install_lcov ${version} + verify_lcov + ;; + h) + usage + exit 0 + ;; + *) + usage + exit 1 + ;; + esac + done +} + +main "$@" \ No newline at end of file diff --git a/tests/system-test/0-others/compatibility.py b/tests/system-test/0-others/compatibility.py index 7c3eb48fe1..6a78a051ab 100644 --- a/tests/system-test/0-others/compatibility.py +++ b/tests/system-test/0-others/compatibility.py @@ -106,8 +106,8 @@ class TDTestCase: my_file = Path(f"{packagePath}/{packageName}") if not my_file.exists(): print(f"{packageName} is not exists") - tdLog.info(f"cd {packagePath} && wget https://www.tdengine.com/assets-download/3.0/{packageName}") - os.system(f"cd {packagePath} && wget https://www.tdengine.com/assets-download/3.0/{packageName}") + tdLog.info(f"cd {packagePath} && wget https://www.taosdata.com/assets-download/3.0/{packageName}") + os.system(f"cd {packagePath} && wget https://www.taosdata.com/assets-download/3.0/{packageName}") else: print(f"{packageName} has been exists") os.system(f" cd {packagePath} && tar xvf {packageName} && cd {packageTPath} && ./install.sh -e no " ) diff --git a/tests/system-test/0-others/multilevel.py b/tests/system-test/0-others/multilevel.py index 3ed4002fcd..971451a023 100644 --- a/tests/system-test/0-others/multilevel.py +++ b/tests/system-test/0-others/multilevel.py @@ -287,6 +287,55 @@ class TDTestCase: checkFiles('/mnt/data3/vnode/*/tsdb/v*',0) checkFiles('/mnt/data4/vnode/*/tsdb/v*',1) + def test_alter_disable_err_case(self): + tdLog.info("============== test_alter_disable_err_case test ===============") + tdDnodes.stop(1) + cfg={ + '/mnt/data1 0 1 1' : 'dataDir', + '/mnt/data2 0 0 0' : 'dataDir' + } + tdSql.createDir('/mnt/data1') + tdSql.createDir('/mnt/data2') + tdDnodes.deploy(1,cfg) + tdDnodes.start(1) + + tdSql.execute('alter dnode 1 "dataDir /mnt/data2 1"') + tdSql.error('alter dnode 1 "dataDir /mnt/errpath 1"') + tdSql.error('alter dnode 1 "dataDir /mnt/data2 3"') + tdSql.error('alter dnode 1 "dataDir /mnt/data2 ee"') + + def test_alter_disable_case(self): + tdLog.info("============== test_alter_disable_case test ===============") + tdDnodes.stop(1) + cfg={ + '/mnt/data1 0 1 1' : 'dataDir', + '/mnt/data2 0 0 0' : 'dataDir', + '/mnt/data3 0 0 0' : 'dataDir' + } + tdSql.createDir('/mnt/data1') + tdSql.createDir('/mnt/data2') + tdSql.createDir('/mnt/data3') + tdDnodes.deploy(1,cfg) + tdDnodes.start(1) + + tdSql.execute('create database dbtest duration 3') + tdSql.execute('use dbtest') + tdSql.execute('create table stb (ts timestamp,c0 int) tags(t0 int)') + tdSql.execute('create table tb1 using stb tags(1)') + for i in range(1,600, 30): + tdSql.execute(f'insert into tb1 values(now-{i}d,10)') + tdSql.execute('flush database dbtest') + + tdSql.execute('alter dnode 1 "dataDir /mnt/data2 1"') + + tdSql.execute('create database dbtest1 duration 3') + tdSql.execute('use dbtest1') + tdSql.execute('create table stb (ts timestamp,c0 int) tags(t0 int)') + tdSql.execute('create table tb1 using stb tags(1)') + for i in range(1,600, 30): + tdSql.execute(f'insert into tb1 values(now-{i}d,10)') + tdSql.execute('flush database dbtest1') + def run(self): self.basic() self.dir_not_exist() @@ -297,8 +346,8 @@ class TDTestCase: self.trim_database() self.missing_middle_level() self.disable_create_new_file() - - + self.test_alter_disable_err_case() + self.test_alter_disable_case() def stop(self): tdSql.close() diff --git a/tests/system-test/0-others/udfTest.py b/tests/system-test/0-others/udfTest.py index 829a8aec27..d3efa61e04 100644 --- a/tests/system-test/0-others/udfTest.py +++ b/tests/system-test/0-others/udfTest.py @@ -4,6 +4,8 @@ import sys import time import os import platform +import random +import string from util.log import * from util.sql import * @@ -12,7 +14,7 @@ from util.dnodes import * import subprocess class TDTestCase: - + updatecfgDict = {'udfdResFuncs': "udf1,udf2"} def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") @@ -652,10 +654,20 @@ class TDTestCase: tdDnodes.start(1) time.sleep(2) + def test_udfd_cmd(self): + tdLog.info(" test udfd -V ") + os.system("udfd -V") + tdLog.info(" test udfd -c ") + os.system("udfd -c") + + letters = string.ascii_letters + string.digits + '\\' + path = ''.join(random.choice(letters) for i in range(5000)) - def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring + os.system(f"udfd -c {path}") + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring print(" env is ok for all ") + self.test_udfd_cmd() self.prepare_udf_so() self.prepare_data() self.create_udf_function() diff --git a/tests/system-test/0-others/udf_restart_taosd.py b/tests/system-test/0-others/udf_restart_taosd.py index c99e864e71..f9a3f08bf5 100644 --- a/tests/system-test/0-others/udf_restart_taosd.py +++ b/tests/system-test/0-others/udf_restart_taosd.py @@ -11,7 +11,7 @@ from util.dnodes import * import subprocess class TDTestCase: - + updatecfgDict = {'udfdResFuncs': "udf1,udf2"} def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") diff --git a/tests/system-test/1-insert/boundary.py b/tests/system-test/1-insert/boundary.py index 25782fd0c3..129b0f275c 100644 --- a/tests/system-test/1-insert/boundary.py +++ b/tests/system-test/1-insert/boundary.py @@ -130,6 +130,8 @@ class TDTestCase: tdSql.error(f'create user {username} pass "test123@#$"') if "Name or password too long" in tdSql.error_info: tdLog.info("error info is true!") + elif "Password too short or empty" in tdSql.error_info: + tdLog.info("error info is true!") else: tdLog.exit("error info is not true") @@ -146,6 +148,10 @@ class TDTestCase: tdSql.error(f'create user {username} pass "{password}@1"') if "Invalid password format" in tdSql.error_info: tdLog.info("error info is true!") + elif "Name or password too long" in tdSql.error_info: + tdLog.info("error info is true!") + elif "Password too short or empty" in tdSql.error_info: + tdLog.info("error info is true!") else: tdLog.exit("error info is not true") def sql_length_check(self): diff --git a/tests/system-test/2-query/Now.py b/tests/system-test/2-query/Now.py index 21ff1f4e06..9e7d240c0a 100644 --- a/tests/system-test/2-query/Now.py +++ b/tests/system-test/2-query/Now.py @@ -41,8 +41,9 @@ class TDTestCase: ] self.time_unit = ['b','u','a','s','m','h','d','w'] self.symbol = ['+','-','*','/'] - self.error_values = [1.5,'abc','"abc"','!@','today()'] + self.error_values = ['abc','"abc"','!@','today()'] self.db_percision = ['ms','us','ns'] + self.test_values = [1.5, 10] def tbtype_check(self,tb_type): if tb_type == 'normal table' or tb_type == 'child table': tdSql.checkRows(len(self.values_list)) @@ -71,6 +72,9 @@ class TDTestCase: self.tbtype_check(tb_type) for i in range(len(self.values_list)): tdSql.checkData(i,0,None) + for param in self.test_values: + tdSql.query(f'select now() {symbol}{param} from {tbname}') + tdSql.query(f'select 1 {symbol}{param} from {tbname}') def now_check_ntb(self): for time_unit in self.db_percision: diff --git a/tests/system-test/2-query/Today.py b/tests/system-test/2-query/Today.py index 77e6bd8cb6..745ed31c2c 100644 --- a/tests/system-test/2-query/Today.py +++ b/tests/system-test/2-query/Today.py @@ -18,7 +18,7 @@ class TDTestCase: self.today_ts = datetime.datetime.strptime(datetime.datetime.now().strftime("%Y-%m-%d"), "%Y-%m-%d").timestamp() self.today_ts_ns = 0 self.time_unit = ['b','u','a','s','m','h','d','w'] - self.error_param = ['1.5','abc','!@#','"abc"','today()'] + self.error_param = ['abc','!@#','"abc"','today()'] self.arithmetic_operators = ['+','-','*','/'] self.relational_operator = ['<','<=','=','>=','>'] # prepare data diff --git a/tests/system-test/2-query/operator.py b/tests/system-test/2-query/operator.py new file mode 100644 index 0000000000..2e2f0af802 --- /dev/null +++ b/tests/system-test/2-query/operator.py @@ -0,0 +1,347 @@ +from wsgiref.headers import tspecials +from util.log import * +from util.cases import * +from util.sql import * +from util.common import tdCom +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.dbname = "db" + self.rowNum = 10 + self.ts = 1537146000000 + + # test in/not in contidion with invalid value + def ts5757(self): + + tdSql.execute(f"create database if not exists {self.dbname}") + + tdSql.execute(f"DROP STABLE IF EXISTS {self.dbname}.super_t1;") + tdSql.execute(f"DROP TABLE IF EXISTS {self.dbname}.t1;") + tdSql.execute(f"CREATE STABLE IF NOT EXISTS {self.dbname}.super_t1(time TIMESTAMP, c0 BIGINT UNSIGNED) TAGS (location BINARY(64))") + tdSql.execute(f"CREATE TABLE {self.dbname}.t1 USING {self.dbname}.super_t1 TAGS ('ek')") + tdSql.execute(f"INSERT INTO {self.dbname}.t1(time, c0) VALUES (1641024000000, 1);") + tdSql.execute(f"INSERT INTO {self.dbname}.t1(time, c0) VALUES (1641024005000, 2);") + tdSql.execute(f"INSERT INTO {self.dbname}.t1(time, c0) VALUES (1641024010000, NULL);") + + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IS NOT NULL AND c0 IN (-1);") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IS NULL AND c0 IN (-1);") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IS NOT NULL AND c0 IN (-1, 1);") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IS NULL AND c0 IN (2, -1, 1);") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IS NOT NULL AND c0 NOT IN (-1);") + tdSql.checkRows(2) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IS NULL AND c0 NOT IN (-1);") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IS NULL AND c0 NOT IN (3);") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IS NOT NULL AND c0 NOT IN (-1, 1);") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IS NULL AND c0 NOT IN (2, -1, 1);") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE (c0 IS NULL AND c0 IN (-1)) or c0 in(1)") + tdSql.checkRows(1) + + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IS NOT NULL OR c0 IN (-1);") + tdSql.checkRows(2) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IS NULL OR c0 IN (-1);") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IS NOT NULL OR c0 IN (-1, 1);") + tdSql.checkRows(2) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IS NULL OR c0 IN (2, -1, 1);") + tdSql.checkRows(3) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IS NOT NULL OR c0 NOT IN (-1);") + tdSql.checkRows(2) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IS NULL OR c0 NOT IN (-1);") + tdSql.checkRows(3) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IS NULL OR c0 NOT IN (3);") + tdSql.checkRows(3) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IS NOT NULL OR c0 NOT IN (-1, 1);") + tdSql.checkRows(2) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IS NOT NULL OR c0 NOT IN (-1);") + tdSql.checkRows(2) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IS NULL OR c0 NOT IN (2, -1, 1);") + tdSql.checkRows(1) + + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE ((c0 is NULL) AND (c0 in (-1)) )") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE ((c0 in (-1)) AND (c0 is NULL) )") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE ((c0 in (-1)) AND (c0 is NULL) ) OR c0 in(1)") + tdSql.checkRows(1) + + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IN (-1);") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IS NOT NULL;") + tdSql.checkRows(2) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IN (-1) or c0 in(1);") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IN (1) or c0 in(-1);") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IN (-1) or c0 in(-1);") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IN (-1) and c0 in(1);") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IN (1) and c0 in(-1);") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM {self.dbname}.t1 WHERE c0 IN (-1) and c0 in(-1);") + tdSql.checkRows(0) + + def ts5760(self): + tdSql.execute(f"create database if not exists {self.dbname}") + + tdSql.execute(f"DROP TABLE IF EXISTS {self.dbname}.t1;") + tdSql.execute(f"CREATE TABLE {self.dbname}.t1( time TIMESTAMP, c0 INT);") + tdSql.execute(f"INSERT INTO {self.dbname}.t1(time, c0) VALUES (1641024000000, 1);") + + tdSql.query(f"SELECT time, c0 FROM {self.dbname}.t1 WHERE (time - c0) > 0;") + tdSql.checkRows(1) + tdSql.query(f"SELECT time, c0 FROM {self.dbname}.t1 WHERE (time + c0) > 0;") + tdSql.checkRows(1) + tdSql.query(f"SELECT time, c0 FROM {self.dbname}.t1 WHERE (-(- c0)) > 0;") + tdSql.checkRows(1) + tdSql.query(f"SELECT time, c0 FROM {self.dbname}.t1 WHERE -(- c0) > 0;") + tdSql.checkRows(1) + tdSql.query(f"SELECT time, c0 FROM {self.dbname}.t1 WHERE -(- c0) < 0;") + tdSql.checkRows(0) + + tdSql.query(f"SELECT time, c0 FROM {self.dbname}.t1 WHERE -(- c0) = 0;") + tdSql.checkRows(0) + + tdSql.query(f"SELECT time, c0 FROM {self.dbname}.t1 WHERE (- c0) > 0;") + tdSql.checkRows(0) + tdSql.query(f"SELECT time, c0 FROM {self.dbname}.t1 WHERE (- c0) < 0;") + tdSql.checkRows(1) + + tdSql.query(f"SELECT time, c0 FROM {self.dbname}.t1 WHERE (time + (- c0)) > 0;") + tdSql.checkRows(1) + + tdSql.query(f"SELECT time, c0 FROM {self.dbname}.t1 WHERE (time + (- c0)) > 0;") + tdSql.checkRows(1) + + tdSql.query(f"SELECT time, c0 FROM {self.dbname}.t1 WHERE (time - (- (- c0)) ) > 0;") + tdSql.checkRows(1) + + tdSql.query(f"SELECT time, c0 FROM {self.dbname}.t1 WHERE (time + (-(- c0))) > 0;") + tdSql.checkRows(1) + + def ts5758(self): + tdSql.execute(f"create database if not exists {self.dbname}") + + tdSql.execute(f"DROP TABLE IF EXISTS {self.dbname}.t1;") + tdSql.execute(f"CREATE TABLE {self.dbname}.t1( time TIMESTAMP, c1 BIGINT);") + tdSql.execute(f"INSERT INTO {self.dbname}.t1(time, c1) VALUES (1641024000000, 0);") + tdSql.execute(f"INSERT INTO {self.dbname}.t1(time, c1) VALUES (1641024000001, 1);") + tdSql.execute(f"INSERT INTO {self.dbname}.t1(time, c1) VALUES (1641024000002, 2);") + tdSql.execute(f"INSERT INTO {self.dbname}.t1(time, c1) VALUES (1641024000003, 3);") + tdSql.execute(f"INSERT INTO {self.dbname}.t1(time, c1) VALUES (1641024000004, 4);") + tdSql.execute(f"INSERT INTO {self.dbname}.t1(time, c1) VALUES (1641024000005, 5);") + + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE time IN (1) AND time BETWEEN (1741024000000) AND (1741024000000);") + tdSql.checkRows(0) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE time BETWEEN (1741024000000) AND (1741024000000) AND time IN (1);") + tdSql.checkRows(0) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE time IN (1) and time BETWEEN (1741024000000) AND (1741024000000) AND time IN (1);") + tdSql.checkRows(0) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE time IN (1641024000000) and time BETWEEN (1741024000000) AND (1741024000000) AND time IN (1);") + tdSql.checkRows(0) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE time IN (1) AND time BETWEEN (1641024000000) AND (1741024000000);") + tdSql.checkRows(0) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE time IN (1641024000001) AND time BETWEEN (1641024000000) AND (1741024000000);") + tdSql.checkRows(1) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE time IN (1641024000001, 1641024000002, 1641024000003) AND time BETWEEN (1641024000000) AND (1741024000000);") + tdSql.checkRows(3) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE time IN (1641024000001, 1641024000002, 1641024000005) AND time BETWEEN (1641024000000) AND (1641024000004);") + tdSql.checkRows(2) + + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE time IN (1) OR time = 1741024000000;") + tdSql.checkRows(0) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE time = 1741024000000 OR time IN (1);") + tdSql.checkRows(0) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE time IN (1, 2, 3) OR time BETWEEN (1641024000000) and (1741024000000);") + tdSql.checkRows(6) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE time IN (1, 2, 3) OR time = 1641024000000;") + tdSql.checkRows(1) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE time = 1641024000001 OR time BETWEEN (1641024000000) and (1641024000002);") + tdSql.checkRows(3) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE time = 1641024000004 OR time BETWEEN (1641024000000) and (1641024000002);") + tdSql.checkRows(4) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE time = 1641024000001 OR time = 1741024000000;") + tdSql.checkRows(1) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE time IN (1641024000001, 1641024000002) OR time = 1741024000000;") + tdSql.checkRows(2) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE time IN (1641024000001, 1641024000002) OR time BETWEEN (1641024000000) and (1741024000000);") + tdSql.checkRows(6) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE (time = 1641024000004 OR time BETWEEN (1641024000000) and (1641024000002)) and time in(1);") + tdSql.checkRows(0) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE (time = 1641024000004 OR time BETWEEN (1641024000000) and (1641024000002)) and time in(1641024000004, 1641024000002);") + tdSql.checkRows(2) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE (time = 1641024000004 OR time BETWEEN (1641024000000) and (1641024000002)) or time in(1);") + tdSql.checkRows(4) + + + def ts5759(self): + tdSql.execute(f"create database if not exists {self.dbname}") + + tdSql.execute(f"DROP TABLE IF EXISTS {self.dbname}.t1;") + tdSql.execute(f"CREATE TABLE {self.dbname}.t1( time TIMESTAMP, c1 BIGINT);") + tdSql.execute(f"INSERT INTO {self.dbname}.t1(time, c1) VALUES (1641024000000, 0);") + + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE (time BETWEEN 1641024000000 AND 1641024000001)") + tdSql.checkRows(1) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE (time BETWEEN 1641024000000 AND 1641024000001) OR (1 < 2)") + tdSql.checkRows(1) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE (time BETWEEN 1641024000000 AND 1641024000001) OR (3 < 2)") + tdSql.checkRows(1) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE (time BETWEEN 1641024000000 AND 1641024000001) and (1 < 2)") + tdSql.checkRows(1) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE (time BETWEEN 1641024000000 AND 1641024000001) OR (1 > 2)") + tdSql.checkRows(1) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE (time BETWEEN 1641024000000 AND 1641024000001) and (1 > 2)") + tdSql.checkRows(0) + + tdSql.execute(f"INSERT INTO {self.dbname}.t1(time, c1) VALUES (1641024000001, 1);") + tdSql.execute(f"INSERT INTO {self.dbname}.t1(time, c1) VALUES (1641024000002, 2);") + + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE (time BETWEEN 1641024000000 AND 1641024000001) OR (1 < 2)") + tdSql.checkRows(3) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE (time BETWEEN 1641024000000 AND 1641024000001) OR (1 > 2)") + tdSql.checkRows(2) + tdSql.query(f"SELECT c1 FROM {self.dbname}.t1 WHERE (time BETWEEN 1641024000000 AND 1641024000001) and (1 < 2)") + tdSql.checkRows(2) + + def operOnTime(self): + tdSql.execute(f"create database if not exists {self.dbname}") + + tdSql.execute(f"DROP TABLE IF EXISTS {self.dbname}.t1;") + tdSql.execute(f"CREATE TABLE {self.dbname}.t1( ts TIMESTAMP, c0 INT, c1 INT UNSIGNED, \ + c2 BIGINT, c3 BIGINT UNSIGNED, c4 SMALLINT, c5 SMALLINT UNSIGNED, c6 TINYINT, c7 TINYINT UNSIGNED);") + tdSql.execute(f"INSERT INTO {self.dbname}.t1 VALUES (1641024000001, 1, 1, 1, 1, 1, 1, 1, 1);") + + columns = ["c0", "c1", "c2", "c3", "c4", "c5", "c6", "c7"] + for col in columns: + tdLog.debug(f"oper on time test, {col} start ...") + tdSql.query(f"SELECT ts, ts+1, ts+{col}, ts+(-{col}) FROM {self.dbname}.t1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1641024000001) + tdSql.checkData(0, 1, 1641024000002) + tdSql.checkData(0, 2, 1641024000002) + tdSql.checkData(0, 3, 1641024000000) + + tdSql.query(f"SELECT ts, ts+1, ts+{col}, ts+(-{col}) FROM {self.dbname}.t1 where (ts-(-{col})) > 0") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1641024000001) + tdSql.checkData(0, 1, 1641024000002) + tdSql.checkData(0, 2, 1641024000002) + tdSql.checkData(0, 3, 1641024000000) + + tdSql.query(f"SELECT ts, ts-1, ts-{col}, ts-(-{col}) FROM {self.dbname}.t1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1641024000001) + tdSql.checkData(0, 1, 1641024000000) + tdSql.checkData(0, 2, 1641024000000) + tdSql.checkData(0, 3, 1641024000002) + + tdSql.query(f"SELECT ts, ts+true, ts-true, ts-false, ts+false FROM {self.dbname}.t1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1641024000001) + tdSql.checkData(0, 1, 1641024000002) + tdSql.checkData(0, 2, 1641024000000) + tdSql.checkData(0, 3, 1641024000001) + tdSql.checkData(0, 4, 1641024000001) + + tdSql.execute(f"DROP TABLE IF EXISTS {self.dbname}.t2;") + tdSql.execute(f"CREATE TABLE {self.dbname}.t2( ts TIMESTAMP, c1 float, c2 double);") + tdSql.execute(f"INSERT INTO {self.dbname}.t2(ts, c1, c2) VALUES (1641024000001, 1.0, 1.0);") + + columns = ["c1", "c2"] + for col in columns: + tdSql.query(f"SELECT ts, ts+{col}, ts+(-{col}), ts-{col}, ts-(-{col}) FROM {self.dbname}.t2") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1641024000001) + tdSql.checkData(0, 1, 1641024000002) + tdSql.checkData(0, 2, 1641024000000) + tdSql.checkData(0, 3, 1641024000000) + tdSql.checkData(0, 4, 1641024000002) + + tdSql.query(f"SELECT ts, ts+{col}, ts+(-{col}), ts-{col}, ts-(-{col}) FROM {self.dbname}.t2 where (ts-(-{col})) > 0") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1641024000001) + tdSql.checkData(0, 1, 1641024000002) + tdSql.checkData(0, 2, 1641024000000) + tdSql.checkData(0, 3, 1641024000000) + tdSql.checkData(0, 4, 1641024000002) + + tdSql.query(f"SELECT ts, cast(ts+{col} as bigint), cast(ts+(-{col}) as bigint), cast(ts-{col} as bigint),\ + cast(ts-(-{col}) as bigint) FROM {self.dbname}.t2") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1641024000001) + tdSql.checkData(0, 1, 1641024000002) + tdSql.checkData(0, 2, 1641024000000) + tdSql.checkData(0, 3, 1641024000000) + tdSql.checkData(0, 4, 1641024000002) + + tdSql.query(f"SELECT sum(ts + c1), sum(ts+c2) from {self.dbname}.t2") + tdSql.checkData(0, 0, 1641024000002) + tdSql.checkData(0, 1, 1641024000002) + tdSql.query(f"SELECT sum(ts * c1), sum(ts*c2) from {self.dbname}.t2") + tdSql.checkData(0, 0, 1641024000001) + tdSql.checkData(0, 1, 1641024000001) + tdSql.query(f"SELECT sum(ts / c1), sum(ts/c2) from {self.dbname}.t2") + tdSql.checkData(0, 0, 1641024000001) + tdSql.checkData(0, 1, 1641024000001) + tdSql.execute(f"INSERT INTO {self.dbname}.t2(ts, c1, c2) VALUES (1641024000002, 2.0, 2.0);") + tdSql.query(f"SELECT sum(ts + c1), sum(ts+c2) from {self.dbname}.t2") + tdSql.checkData(0, 0, 3282048000006) + tdSql.checkData(0, 1, 3282048000006) + tdSql.query(f"SELECT sum(ts - c1), sum(ts-c2) from {self.dbname}.t2") + tdSql.checkData(0, 0, 3282048000000) + tdSql.checkData(0, 1, 3282048000000) + tdSql.query(f"SELECT sum(ts * c1), sum(ts*c2) from {self.dbname}.t2") + tdSql.checkData(0, 0, 4923072000005) + tdSql.checkData(0, 1, 4923072000005) + tdSql.query(f"SELECT ts / c1, ts/c2 from {self.dbname}.t2 order by ts") + tdSql.checkData(0, 0, 1641024000001) + tdSql.checkData(0, 1, 1641024000001) + tdSql.checkData(1, 0, 820512000001) + tdSql.checkData(1, 1, 820512000001) + tdSql.query(f"SELECT sum(ts / c1), sum(ts/c2) from {self.dbname}.t2") + tdSql.checkData(0, 0, 2461536000002) + tdSql.checkData(0, 1, 2461536000002) + + # data overflow + tdSql.query(f"SELECT ts + 9223372036854775807 from {self.dbname}.t2 order by ts") + tdSql.query(f"SELECT ts - 9223372036854775808 from {self.dbname}.t2 order by ts") + + tdSql.query(f"SELECT ts + 8223372036854775807 from {self.dbname}.t2 order by ts") + tdSql.query(f"SELECT ts - 8223372036854775808 from {self.dbname}.t2 order by ts") + + def run(self): + dbname = "db" + tdSql.prepare() + tdSql.execute(f"create database if not exists {self.dbname}") + + self.ts5757() + self.ts5760() + self.ts5758() + self.ts5759() + self.operOnTime() + + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) + +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/sum.py b/tests/system-test/2-query/sum.py index 5abd58d3f9..88f2a37aec 100644 --- a/tests/system-test/2-query/sum.py +++ b/tests/system-test/2-query/sum.py @@ -78,7 +78,7 @@ class TDTestCase: ) # sqls.extend( f"select sum( {un_num_col} + {un_num_col_2} ) from {tbanme} " for un_num_col_2 in UN_NUM_COL ) - sqls.extend( f"select sum( {num_col} + {ts_col} ) from {DBNAME}.{tbanme} " for num_col in NUM_COL for ts_col in TS_TYPE_COL) + #sqls.extend( f"select sum( {num_col} + {ts_col} ) from {DBNAME}.{tbanme} " for num_col in NUM_COL for ts_col in TS_TYPE_COL) sqls.extend( ( f"select sum() from {DBNAME}.{tbanme} ", diff --git a/tests/system-test/2-query/ts-5761-scalemode.py b/tests/system-test/2-query/ts-5761-scalemode.py new file mode 100644 index 0000000000..0eeabd3af6 --- /dev/null +++ b/tests/system-test/2-query/ts-5761-scalemode.py @@ -0,0 +1,150 @@ +import taos + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'filterScalarMode':1} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + self.dbname = 'db' + self.stbname = 'st' + + def prepareData(self): + # db + tdSql.execute(f"create database db;") + tdSql.execute(f"use db") + + # super tableUNSIGNED + tdSql.execute("CREATE TABLE st( time TIMESTAMP, c1 BIGINT, c2 smallint, c3 double, c4 int UNSIGNED, c5 bool, c6 binary(32), c7 nchar(32)) tags(t1 binary(32), t2 nchar(32))") + tdSql.execute("create table t1 using st tags('1', '1.7')") + tdSql.execute("create table t2 using st tags('0', '')") + tdSql.execute("create table t3 using st tags('1', 'er')") + + # create index for all tags + tdSql.execute("INSERT INTO t1 VALUES (1641024000000, 1, 1, 1, 1, 1, '1', '1.7')") + tdSql.execute("INSERT INTO t1 VALUES (1641024000001, 0, 0, 1.7, 0, 0, '0', '')") + tdSql.execute("INSERT INTO t1 VALUES (1641024000002, 1, 1, 1, 1, 1, '1', 'er')") + tdSql.execute("INSERT INTO t2 VALUES (1641024000002, 1, 1, 1, 1, 1, '1', 'er')") + tdSql.execute("INSERT INTO t3 VALUES (1641024000002, 1, 1, 1, 1, 1, '1', 'er')") + + tdSql.execute("CREATE TABLE stt( time TIMESTAMP, c1 BIGINT, c2 timestamp, c3 int, c4 int UNSIGNED, c5 bool, c6 binary(32), c7 nchar(32)) tags(t1 binary(32), t2 nchar(32))") + tdSql.execute("create table tt1 using stt tags('1', '1.7')") + + # create index for all tags + tdSql.execute("INSERT INTO tt1 VALUES (1641024000000, 9223372036854775807, 1641024000000, 1, 1, 1, '1', '1.7')") + + def check(self): + tdSql.query(f"SELECT * FROM tt1 WHERE c1 in (1.7, 9223372036854775803, '')") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM tt1 WHERE c1 = 9223372036854775803") + tdSql.checkRows(0) + + tdSql.query(f"SELECT * FROM t1 WHERE c1 = 1.7") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM t1 WHERE c1 in (1.7, 2)") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM t1 WHERE c1 not in (1.7, 2)") + tdSql.checkRows(3) + + tdSql.query(f"SELECT * FROM t1 WHERE c2 = 1.7") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM t1 WHERE c2 in (1.7, 2)") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM t1 WHERE c2 not in (1.7, 2)") + tdSql.checkRows(3) + + tdSql.query(f"SELECT * FROM t1 WHERE c3 = 1.7") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM t1 WHERE c3 in (1.7, 2)") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM t1 WHERE c3 not in (1.7, 2)") + tdSql.checkRows(2) + + tdSql.query(f"SELECT * FROM t1 WHERE c4 = 1.7") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM t1 WHERE c4 in (1.7, 2)") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM t1 WHERE c4 not in (1.7, 2)") + tdSql.checkRows(3) + + tdSql.query(f"SELECT * FROM t1 WHERE c5 = 1.7") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM t1 WHERE c5 in (1.7, 2)") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM t1 WHERE c5 not in (1.7, 2)") + tdSql.checkRows(3) + + tdSql.query(f"SELECT * FROM t1 WHERE c6 = 1.7") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM t1 WHERE c6 in (1.7, 2)") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM t1 WHERE c6 not in (1.7, 2)") + tdSql.checkRows(3) + + tdSql.query(f"SELECT * FROM t1 WHERE c6 = 1") + tdSql.checkRows(2) + tdSql.query(f"SELECT * FROM t1 WHERE c6 in (1, 2)") + tdSql.checkRows(2) + tdSql.query(f"SELECT * FROM t1 WHERE c6 not in (1, 2)") + tdSql.checkRows(1) + + tdSql.query(f"SELECT * FROM t1 WHERE c6 = 0") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM t1 WHERE c6 in (0, 2)") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM t1 WHERE c6 not in (0, 2)") + tdSql.checkRows(2) + tdSql.query(f"SELECT * FROM t1 WHERE c6 not in (0, 2, 'sef')") + tdSql.checkRows(2) + + tdSql.query(f"SELECT * FROM t1 WHERE c7 = 1.7") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM t1 WHERE c7 in (1.7, 2)") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM t1 WHERE c7 not in (1.7, 2)") + tdSql.checkRows(2) + + tdSql.query(f"SELECT * FROM t1 WHERE c7 = 0") + tdSql.checkRows(2) + tdSql.query(f"SELECT * FROM t1 WHERE c7 in (0, 2)") + tdSql.checkRows(2) + tdSql.query(f"SELECT * FROM t1 WHERE c7 not in (0, 2)") + tdSql.checkRows(1) + + tdSql.query(f"SELECT * FROM t1 WHERE c7 = ''") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM t1 WHERE c7 in ('', 2)") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM t1 WHERE c7 not in ('', 2)") + tdSql.checkRows(2) + + tdSql.query(f"SELECT * FROM st WHERE t2 in ('', 2)") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM st WHERE t2 not in ('', 2)") + tdSql.checkRows(4) + + tdSql.query(f"SELECT * FROM st WHERE t1 in ('d343', 0, 2)") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM st WHERE t1 in (0, 2)") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM st WHERE t1 not in (0, 2)") + tdSql.checkRows(4) + + def run(self): + self.prepareData() + self.check() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/ts-5761.py b/tests/system-test/2-query/ts-5761.py new file mode 100644 index 0000000000..5c8430d856 --- /dev/null +++ b/tests/system-test/2-query/ts-5761.py @@ -0,0 +1,149 @@ +import taos + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + self.dbname = 'db' + self.stbname = 'st' + + def prepareData(self): + # db + tdSql.execute(f"create database db;") + tdSql.execute(f"use db") + + # super tableUNSIGNED + tdSql.execute("CREATE TABLE st( time TIMESTAMP, c1 BIGINT, c2 smallint, c3 double, c4 int UNSIGNED, c5 bool, c6 binary(32), c7 nchar(32)) tags(t1 binary(32), t2 nchar(32))") + tdSql.execute("create table t1 using st tags('1', '1.7')") + tdSql.execute("create table t2 using st tags('0', '')") + tdSql.execute("create table t3 using st tags('1', 'er')") + + # create index for all tags + tdSql.execute("INSERT INTO t1 VALUES (1641024000000, 1, 1, 1, 1, 1, '1', '1.7')") + tdSql.execute("INSERT INTO t1 VALUES (1641024000001, 0, 0, 1.7, 0, 0, '0', '')") + tdSql.execute("INSERT INTO t1 VALUES (1641024000002, 1, 1, 1, 1, 1, '1', 'er')") + tdSql.execute("INSERT INTO t2 VALUES (1641024000002, 1, 1, 1, 1, 1, '1', 'er')") + tdSql.execute("INSERT INTO t3 VALUES (1641024000002, 1, 1, 1, 1, 1, '1', 'er')") + + tdSql.execute("CREATE TABLE stt( time TIMESTAMP, c1 BIGINT, c2 timestamp, c3 int, c4 int UNSIGNED, c5 bool, c6 binary(32), c7 nchar(32)) tags(t1 binary(32), t2 nchar(32))") + tdSql.execute("create table tt1 using stt tags('1', '1.7')") + + # create index for all tags + tdSql.execute("INSERT INTO tt1 VALUES (1641024000000, 9223372036854775807, 1641024000000, 1, 1, 1, '1', '1.7')") + + def check(self): + tdSql.query(f"SELECT * FROM tt1 WHERE c1 in (1.7, 9223372036854775803, '')") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM tt1 WHERE c1 = 9223372036854775803") + tdSql.checkRows(0) + + tdSql.query(f"SELECT * FROM t1 WHERE c1 = 1.7") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM t1 WHERE c1 in (1.7, 2)") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM t1 WHERE c1 not in (1.7, 2)") + tdSql.checkRows(3) + + tdSql.query(f"SELECT * FROM t1 WHERE c2 = 1.7") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM t1 WHERE c2 in (1.7, 2)") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM t1 WHERE c2 not in (1.7, 2)") + tdSql.checkRows(3) + + tdSql.query(f"SELECT * FROM t1 WHERE c3 = 1.7") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM t1 WHERE c3 in (1.7, 2)") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM t1 WHERE c3 not in (1.7, 2)") + tdSql.checkRows(2) + + tdSql.query(f"SELECT * FROM t1 WHERE c4 = 1.7") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM t1 WHERE c4 in (1.7, 2)") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM t1 WHERE c4 not in (1.7, 2)") + tdSql.checkRows(3) + + tdSql.query(f"SELECT * FROM t1 WHERE c5 = 1.7") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM t1 WHERE c5 in (1.7, 2)") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM t1 WHERE c5 not in (1.7, 2)") + tdSql.checkRows(3) + + tdSql.query(f"SELECT * FROM t1 WHERE c6 = 1.7") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM t1 WHERE c6 in (1.7, 2)") + tdSql.checkRows(0) + tdSql.query(f"SELECT * FROM t1 WHERE c6 not in (1.7, 2)") + tdSql.checkRows(3) + + tdSql.query(f"SELECT * FROM t1 WHERE c6 = 1") + tdSql.checkRows(2) + tdSql.query(f"SELECT * FROM t1 WHERE c6 in (1, 2)") + tdSql.checkRows(2) + tdSql.query(f"SELECT * FROM t1 WHERE c6 not in (1, 2)") + tdSql.checkRows(1) + + tdSql.query(f"SELECT * FROM t1 WHERE c6 = 0") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM t1 WHERE c6 in (0, 2)") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM t1 WHERE c6 not in (0, 2)") + tdSql.checkRows(2) + tdSql.query(f"SELECT * FROM t1 WHERE c6 not in (0, 2, 'sef')") + tdSql.checkRows(2) + + tdSql.query(f"SELECT * FROM t1 WHERE c7 = 1.7") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM t1 WHERE c7 in (1.7, 2)") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM t1 WHERE c7 not in (1.7, 2)") + tdSql.checkRows(2) + + tdSql.query(f"SELECT * FROM t1 WHERE c7 = 0") + tdSql.checkRows(2) + tdSql.query(f"SELECT * FROM t1 WHERE c7 in (0, 2)") + tdSql.checkRows(2) + tdSql.query(f"SELECT * FROM t1 WHERE c7 not in (0, 2)") + tdSql.checkRows(1) + + tdSql.query(f"SELECT * FROM t1 WHERE c7 = ''") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM t1 WHERE c7 in ('', 2)") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM t1 WHERE c7 not in ('', 2)") + tdSql.checkRows(2) + + tdSql.query(f"SELECT * FROM st WHERE t2 in ('', 2)") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM st WHERE t2 not in ('', 2)") + tdSql.checkRows(4) + + tdSql.query(f"SELECT * FROM st WHERE t1 in ('d343', 0, 2)") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM st WHERE t1 in (0, 2)") + tdSql.checkRows(1) + tdSql.query(f"SELECT * FROM st WHERE t1 not in (0, 2)") + tdSql.checkRows(4) + + def run(self): + self.prepareData() + self.check() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/union.py b/tests/system-test/2-query/union.py index 98106e9a17..a740cee37d 100644 --- a/tests/system-test/2-query/union.py +++ b/tests/system-test/2-query/union.py @@ -436,6 +436,29 @@ class TDTestCase: tdSql.query(sql, queryTimes=1) tdSql.checkRows(50) + sql = "select null union select null" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + sql = "select null union all select null" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(2) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + + sql = "select null union select 1" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(2) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 1) + + sql = "select null union select 'asd'" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(2) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 'asd') + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/7-tmq/td-33225.py b/tests/system-test/7-tmq/td-33225.py new file mode 100644 index 0000000000..f39e402b55 --- /dev/null +++ b/tests/system-test/7-tmq/td-33225.py @@ -0,0 +1,44 @@ +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +from taos.tmq import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def run(self): + tdSql.execute(f'create database if not exists db_33225') + tdSql.execute(f'use db_33225') + tdSql.execute(f'create stable if not exists s33225 (ts timestamp, c1 int, c2 int) tags (t binary(32), t2 int)') + tdSql.execute(f'insert into t1 using s33225 tags("__devicid__", 1) values(1669092069068, 0, 1)') + + tdSql.execute("create topic db_33225_topic as select ts,c1,t2 from s33225") + tdSql.execute(f'create stream s1 into st1 as select _wstart, count(*), avg(c2),t2 from s33225 PARTITION BY tbname INTERVAL(1m)') + + tdSql.execute(f'alter table s33225 modify column c2 COMPRESS "zlib"') + tdSql.execute(f'create index dex1 on s33225(t2)') + + return + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/utils/tsim/CMakeLists.txt b/utils/tsim/CMakeLists.txt index 209982c659..b725ed919a 100644 --- a/utils/tsim/CMakeLists.txt +++ b/utils/tsim/CMakeLists.txt @@ -1,14 +1,29 @@ -aux_source_directory(src TSIM_SRC) -add_executable(tsim ${TSIM_SRC}) -target_link_libraries( - tsim +LIST(APPEND TSIM_SRC src/simEntry.c) +LIST(APPEND TSIM_SRC src/simExec.c) +LIST(APPEND TSIM_SRC src/simParse.c) +LIST(APPEND TSIM_SRC src/simSystem.c) + +ADD_LIBRARY(tsim_static STATIC ${TSIM_SRC}) +TARGET_INCLUDE_DIRECTORIES( + tsim_static + PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/inc" +) +TARGET_LINK_LIBRARIES( + tsim_static PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os PUBLIC cjson ) -target_include_directories( + +LIST(APPEND TSIM_EXE_SRC src/simMain.c) +ADD_EXECUTABLE(tsim ${TSIM_EXE_SRC}) +TARGET_LINK_LIBRARIES( tsim - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" + PUBLIC tsim_static ) + +IF(${BUILD_TEST}) + ADD_SUBDIRECTORY(test) +ENDIF(${BUILD_TEST}) diff --git a/utils/tsim/inc/simInt.h b/utils/tsim/inc/simInt.h index f2360277e0..0ad0bfdea8 100644 --- a/utils/tsim/inc/simInt.h +++ b/utils/tsim/inc/simInt.h @@ -16,6 +16,10 @@ #ifndef _TD_SIM_INT_H_ #define _TD_SIM_INT_H_ +#ifdef __cplusplus +extern "C" { +#endif + #include "os.h" #include "cJSON.h" @@ -161,8 +165,8 @@ typedef struct _script_t { int32_t type; bool killed; void *taos; - char rows[12]; // number of rows data retrieved - char cols[12]; // number of columns data retrieved + char rows[12]; // number of rows data retrieved + char cols[12]; // number of columns data retrieved char data[MAX_QUERY_ROW_NUM][MAX_QUERY_COL_NUM][MAX_QUERY_VALUE_LEN]; // query results char system_exit_code[12]; char system_ret_content[MAX_SYSTEM_RESULT_LEN]; @@ -192,7 +196,7 @@ SScript *simParseScript(char *fileName); SScript *simProcessCallOver(SScript *script); void *simExecuteScript(void *script); void simInitsimCmdList(); -bool simSystemInit(); +void simSystemInit(); void simSystemCleanUp(); char *simGetVariable(SScript *script, char *varName, int32_t varLen); bool simExecuteExpCmd(SScript *script, char *option); @@ -214,4 +218,11 @@ bool simExecuteLineInsertErrorCmd(SScript *script, char *option); bool simExecuteSetBIModeCmd(SScript *script, char *option); void simVisuallizeOption(SScript *script, char *src, char *dst); +int32_t simEntry(int32_t argc, char **argv); +void simHandleSignal(int32_t signo, void *sigInfo, void *context); + +#ifdef __cplusplus +} +#endif + #endif /*_TD_SIM_INT_H_*/ diff --git a/utils/tsim/src/simEntry.c b/utils/tsim/src/simEntry.c new file mode 100644 index 0000000000..dd11c21af0 --- /dev/null +++ b/utils/tsim/src/simEntry.c @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "simInt.h" + +bool simExecSuccess = false; +bool abortExecution = false; +bool useValgrind = false; + +void simHandleSignal(int32_t signo, void *sigInfo, void *context) { + simSystemCleanUp(); + abortExecution = true; +} + +int32_t simEntry(int32_t argc, char **argv) { + char scriptFile[MAX_FILE_NAME_LEN] = "sim_main_test.sim"; + + for (int32_t i = 1; i < argc; ++i) { + if (strcmp(argv[i], "-c") == 0 && i < argc - 1) { + tstrncpy(configDir, argv[++i], 128); + } else if (strcmp(argv[i], "-f") == 0 && i < argc - 1) { + tstrncpy(scriptFile, argv[++i], MAX_FILE_NAME_LEN); + } else if (strcmp(argv[i], "-v") == 0) { + useValgrind = true; + } else { + printf("usage: %s [options] \n", argv[0]); + printf(" [-c config]: config directory, default is: %s\n", configDir); + printf(" [-f script]: script filename\n"); + return 0; + } + } + + simInfo("simulator is running ..."); + + simSystemInit(); + taosSetSignal(SIGINT, simHandleSignal); + + SScript *script = simParseScript(scriptFile); + if (script == NULL) { + simError("parse script file:%s failed", scriptFile); + return -1; + } + + simScriptList[++simScriptPos] = script; + simExecuteScript(script); + + int32_t ret = simExecSuccess ? 0 : -1; + simInfo("execute result %d", ret); + + return ret; +} diff --git a/utils/tsim/src/simExe.c b/utils/tsim/src/simExec.c similarity index 100% rename from utils/tsim/src/simExe.c rename to utils/tsim/src/simExec.c index a9772addbb..82ce852b1e 100644 --- a/utils/tsim/src/simExe.c +++ b/utils/tsim/src/simExec.c @@ -382,8 +382,8 @@ bool simExecuteRunBackCmd(SScript *script, char *option) { return true; } -void simReplaceDirSep(char *buf) { #ifdef WINDOWS +void simReplaceDirSep(char *buf) { int i = 0; while (buf[i] != '\0') { if (buf[i] == '/') { @@ -391,8 +391,8 @@ void simReplaceDirSep(char *buf) { } i++; } -#endif } +#endif bool simReplaceStr(char *buf, char *src, char *dst) { bool replaced = false; diff --git a/utils/tsim/src/simMain.c b/utils/tsim/src/simMain.c index cd4a5117b2..0af1e2eec2 100644 --- a/utils/tsim/src/simMain.c +++ b/utils/tsim/src/simMain.c @@ -16,58 +16,7 @@ #define _DEFAULT_SOURCE #include "simInt.h" -bool simExecSuccess = false; -bool abortExecution = false; -bool useValgrind = false; - -void simHandleSignal(int32_t signo, void *sigInfo, void *context) { - simSystemCleanUp(); - abortExecution = true; -} - int32_t main(int32_t argc, char *argv[]) { - char scriptFile[MAX_FILE_NAME_LEN] = "sim_main_test.sim"; - - for (int32_t i = 1; i < argc; ++i) { - if (strcmp(argv[i], "-c") == 0 && i < argc - 1) { - tstrncpy(configDir, argv[++i], 128); - } else if (strcmp(argv[i], "-f") == 0 && i < argc - 1) { - tstrncpy(scriptFile, argv[++i], MAX_FILE_NAME_LEN); - } else if (strcmp(argv[i], "-v") == 0) { - useValgrind = true; - } else { - printf("usage: %s [options] \n", argv[0]); - printf(" [-c config]: config directory, default is: %s\n", configDir); - printf(" [-f script]: script filename\n"); - return 0; - } - } - - if (!simSystemInit()) { - simError("failed to initialize the system"); - simSystemCleanUp(); - return -1; - } - - simInfo("simulator is running ..."); - taosSetSignal(SIGINT, simHandleSignal); - - SScript *script = simParseScript(scriptFile); - if (script == NULL) { - simError("parse script file:%s failed", scriptFile); - return -1; - } - - if (abortExecution) { - simError("execute abort"); - return -1; - } - - simScriptList[++simScriptPos] = script; - simExecuteScript(script); - - int32_t ret = simExecSuccess ? 0 : -1; - simInfo("execute result %d", ret); - - return ret; + // entry function used for unit testing. + return simEntry(argc, argv); } diff --git a/utils/tsim/src/simSystem.c b/utils/tsim/src/simSystem.c index dcf5d6ab12..e1dab0cd6c 100644 --- a/utils/tsim/src/simSystem.c +++ b/utils/tsim/src/simSystem.c @@ -35,11 +35,10 @@ int32_t simInitCfg() { return 0; } -bool simSystemInit() { +void simSystemInit() { simInitCfg(); simInitsimCmdList(); memset(simScriptList, 0, sizeof(SScript *) * MAX_MAIN_SCRIPT_NUM); - return true; } void simSystemCleanUp() {} diff --git a/utils/tsim/test/CMakeLists.txt b/utils/tsim/test/CMakeLists.txt new file mode 100644 index 0000000000..5df85bf903 --- /dev/null +++ b/utils/tsim/test/CMakeLists.txt @@ -0,0 +1,23 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) +PROJECT(TDengine) + +FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) +FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64) +FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64) + +IF(HEADER_GTEST_INCLUDE_DIR AND(LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR)) + MESSAGE(STATUS "gTest library found, build os test") + + INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR}) + AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) +ENDIF() + +INCLUDE_DIRECTORIES(${TD_SOURCE_DIR}/src/util/inc) + +ADD_EXECUTABLE(simTests "simTests.cpp") +TARGET_LINK_LIBRARIES(simTests os util tsim_static gtest_main) + +ADD_TEST( + NAME simTests + COMMAND simTests +) \ No newline at end of file diff --git a/utils/tsim/test/simTests.cpp b/utils/tsim/test/simTests.cpp new file mode 100644 index 0000000000..e728a1d4cd --- /dev/null +++ b/utils/tsim/test/simTests.cpp @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wwrite-strings" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic ignored "-Wunused-variable" +#pragma GCC diagnostic ignored "-Wsign-compare" +#pragma GCC diagnostic ignored "-Wsign-compare" +#pragma GCC diagnostic ignored "-Wformat" +#pragma GCC diagnostic ignored "-Wint-to-pointer-cast" +#pragma GCC diagnostic ignored "-Wpointer-arith" + +#include "simInt.h" + +void simHandleSignal(int32_t signo, void *sigInfo, void *context); + +TEST(simTests, parameters) { + int32_t ret = 0; + int32_t argc = 2; + char *argv[4] = {0}; + + simSystemCleanUp(); + // argv[1] = "-c"; + // ret = simEntry(argc, argv); + // EXPECT_EQ(ret, 0); + + // argv[1] = "-f"; + // ret = simEntry(argc, argv); + // EXPECT_EQ(ret, 0); + + // argv[1] = "-v"; + // ret = simEntry(argc, argv); + // EXPECT_EQ(ret, 0); + + // argv[1] = "-h"; + // ret = simEntry(argc, argv); + // EXPECT_EQ(ret, 0); + + // simHandleSignal(0, NULL, NULL); + + // simDebugFlag = 0; + // argc = 1; + // ret = simEntry(argc, argv); + // EXPECT_EQ(ret, -1); +}