diff --git a/.gitignore b/.gitignore index 8f461f2b02..0f68e15c90 100644 --- a/.gitignore +++ b/.gitignore @@ -162,3 +162,12 @@ geos_c.h source/libs/parser/src/sql.c include/common/ttokenauto.h !packaging/smokeTest/pytest_require.txt +tdengine-test-dir/ +localtime.c +private.h +strftime.c +tzdir.h +tzfile.h +coverage.info +taos +taosd \ No newline at end of file diff --git a/docs/en/06-advanced/05-data-in/07-mqtt.md b/docs/en/06-advanced/05-data-in/07-mqtt.md index 3f6eed5834..73ef3b534c 100644 --- a/docs/en/06-advanced/05-data-in/07-mqtt.md +++ b/docs/en/06-advanced/05-data-in/07-mqtt.md @@ -112,14 +112,14 @@ Fill in the example data from the MQTT message body in **Message Body**. JSON data supports JSONObject or JSONArray, and the json parser can parse the following data: -``` json +```json {"id": 1, "message": "hello-word"} {"id": 2, "message": "hello-word"} ``` or -``` json +```json [{"id": 1, "message": "hello-word"},{"id": 2, "message": "hello-word"}] ``` diff --git a/docs/en/06-advanced/05-data-in/08-kafka.md b/docs/en/06-advanced/05-data-in/08-kafka.md index bdd652a5e7..b3bdca4cf9 100644 --- a/docs/en/06-advanced/05-data-in/08-kafka.md +++ b/docs/en/06-advanced/05-data-in/08-kafka.md @@ -109,7 +109,7 @@ In addition, the [Kerberos](https://web.mit.edu/kerberos/) authentication servic After configuration, you can use the [kcat](https://github.com/edenhill/kcat) tool to verify Kafka topic consumption: -```bash +```shell kcat \ -b \ -G kcat \ @@ -171,14 +171,14 @@ Enter sample data from the Kafka message body in **Message Body**. JSON data supports JSONObject or JSONArray, and the following data can be parsed using a JSON parser: -``` json +```json {"id": 1, "message": "hello-word"} {"id": 2, "message": "hello-word"} ``` or -``` json +```json [{"id": 1, "message": "hello-word"},{"id": 2, "message": "hello-word"}] ``` diff --git a/docs/en/06-advanced/05-data-in/index.md b/docs/en/06-advanced/05-data-in/index.md index 59a3c6da7b..5221aa2002 100644 --- a/docs/en/06-advanced/05-data-in/index.md +++ b/docs/en/06-advanced/05-data-in/index.md @@ -83,7 +83,7 @@ Parsing is the process of parsing unstructured strings into structured data. The JSON parsing supports JSONObject or JSONArray. The following JSON sample data can automatically parse fields: `groupid`, `voltage`, `current`, `ts`, `inuse`, `location`. -``` json +```json {"groupid": 170001, "voltage": "221V", "current": 12.3, "ts": "2023-12-18T22:12:00", "inuse": true, "location": "beijing.chaoyang.datun"} {"groupid": 170001, "voltage": "220V", "current": 12.2, "ts": "2023-12-18T22:12:02", "inuse": true, "location": "beijing.chaoyang.datun"} {"groupid": 170001, "voltage": "216V", "current": 12.5, "ts": "2023-12-18T22:12:04", "inuse": false, "location": "beijing.chaoyang.datun"} @@ -91,7 +91,7 @@ JSON parsing supports JSONObject or JSONArray. The following JSON sample data ca Or -``` json +```json [{"groupid": 170001, "voltage": "221V", "current": 12.3, "ts": "2023-12-18T22:12:00", "inuse": true, "location": "beijing.chaoyang.datun"}, {"groupid": 170001, "voltage": "220V", "current": 12.2, "ts": "2023-12-18T22:12:02", "inuse": true, "location": "beijing.chaoyang.datun"}, {"groupid": 170001, "voltage": "216V", "current": 12.5, "ts": "2023-12-18T22:12:04", "inuse": false, "location": "beijing.chaoyang.datun"}] @@ -101,7 +101,7 @@ Subsequent examples will only explain using JSONObject. The following nested JSON data can automatically parse fields `groupid`, `data_voltage`, `data_current`, `ts`, `inuse`, `location_0_province`, `location_0_city`, `location_0_datun`, and you can also choose which fields to parse and set aliases for the parsed fields. -``` json +```json {"groupid": 170001, "data": { "voltage": "221V", "current": 12.3 }, "ts": "2023-12-18T22:12:00", "inuse": true, "location": [{"province": "beijing", "city":"chaoyang", "street": "datun"}]} ``` @@ -114,7 +114,7 @@ The following nested JSON data can automatically parse fields `groupid`, `data_v You can use **named capture groups** in regular expressions to extract multiple fields from any string (text) field. As shown in the figure, extract fields such as access IP, timestamp, and accessed URL from nginx logs. -``` re +```regex (?\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b)\s-\s-\s\[(?\d{2}/\w{3}/\d{4}:\d{2}:\d{2}:\d{2}\s\+\d{4})\]\s"(?[A-Z]+)\s(?[^\s"]+).*(?\d{3})\s(?\d+) ``` @@ -133,7 +133,7 @@ Custom rhai syntax scripts for parsing input data (refer to `https://rhai.rs/boo For example, for data reporting three-phase voltage values, which are entered into three subtables respectively, such data needs to be parsed -``` json +```json { "ts": "2024-06-27 18:00:00", "voltage": "220.1,220.3,221.1", @@ -164,7 +164,7 @@ The final parsing result is shown below: The parsed data may still not meet the data requirements of the target table. For example, the original data collected by a smart meter is as follows (in json format): -``` json +```json {"groupid": 170001, "voltage": "221V", "current": 12.3, "ts": "2023-12-18T22:12:00", "inuse": true, "location": "beijing.chaoyang.datun"} {"groupid": 170001, "voltage": "220V", "current": 12.2, "ts": "2023-12-18T22:12:02", "inuse": true, "location": "beijing.chaoyang.datun"} {"groupid": 170001, "voltage": "216V", "current": 12.5, "ts": "2023-12-18T22:12:04", "inuse": false, "location": "beijing.chaoyang.datun"} diff --git a/docs/en/07-develop/02-sql.md b/docs/en/07-develop/02-sql.md index 57376e615d..d32ff340f3 100644 --- a/docs/en/07-develop/02-sql.md +++ b/docs/en/07-develop/02-sql.md @@ -83,14 +83,14 @@ Next, create a supertable (STABLE) named `meters`, whose table structure include Create Database -```bash +```shell curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' \ --data 'CREATE DATABASE IF NOT EXISTS power' ``` Create Table, specify the database as `power` in the URL -```bash +```shell curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql/power' \ --data 'CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))' ``` @@ -167,7 +167,7 @@ NOW is an internal system function, defaulting to the current time of the client Write data -```bash +```shell curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' \ --data 'INSERT INTO power.d1001 USING power.meters TAGS(2,'\''California.SanFrancisco'\'') VALUES (NOW + 1a, 10.30000, 219, 0.31000) (NOW + 2a, 12.60000, 218, 0.33000) (NOW + 3a, 12.30000, 221, 0.31000) power.d1002 USING power.meters TAGS(3, '\''California.SanFrancisco'\'') VALUES (NOW + 1a, 10.30000, 218, 0.25000)' ``` @@ -247,7 +247,7 @@ Rust connector also supports using **serde** for deserializing to get structured Query Data -```bash +```shell curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' \ --data 'SELECT ts, current, location FROM power.meters limit 100' ``` @@ -329,7 +329,7 @@ Below are code examples of setting reqId to execute SQL in various language conn Query data, specify reqId as 3 -```bash +```shell curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql?req_id=3' \ --data 'SELECT ts, current, location FROM power.meters limit 1' ``` diff --git a/docs/en/07-develop/09-udf.md b/docs/en/07-develop/09-udf.md index 795556c225..0e91dd09db 100644 --- a/docs/en/07-develop/09-udf.md +++ b/docs/en/07-develop/09-udf.md @@ -273,19 +273,19 @@ To better operate the above data structures, some convenience functions are prov Create table: -```bash +```shell create table battery(ts timestamp, vol1 float, vol2 float, vol3 float, deviceId varchar(16)); ``` Create custom function: -```bash +```shell create aggregate function max_vol as '/root/udf/libmaxvol.so' outputtype binary(64) bufsize 10240 language 'C'; ``` Use custom function: -```bash +```shell select max_vol(vol1, vol2, vol3, deviceid) from battery; ``` @@ -334,7 +334,7 @@ When developing UDFs in Python, you need to implement the specified interface fu The interface for scalar functions is as follows. -```Python +```python def process(input: datablock) -> tuple[output_type]: ``` @@ -347,7 +347,7 @@ The main parameters are as follows: The interface for aggregate functions is as follows. -```Python +```python def start() -> bytes: def reduce(inputs: datablock, buf: bytes) -> bytes def finish(buf: bytes) -> output_type: @@ -365,7 +365,7 @@ Finally, when all row data blocks have been processed, the finish function is ca The interfaces for initialization and destruction are as follows. -```Python +```python def init() def destroy() ``` @@ -381,7 +381,7 @@ Parameter description: The template for developing scalar functions in Python is as follows. -```Python +```python def init(): # initialization def destroy(): @@ -393,7 +393,7 @@ def process(input: datablock) -> tuple[output_type]: The template for developing aggregate functions in Python is as follows. -```Python +```python def init(): #initialization def destroy(): @@ -828,7 +828,7 @@ Through this example, we learned how to define aggregate functions and print cus
pybitand.py -```Python +```python {{#include tests/script/sh/pybitand.py}} ``` diff --git a/docs/en/08-operation/04-maintenance.md b/docs/en/08-operation/04-maintenance.md index 5e7fca1f08..970ee40d18 100644 --- a/docs/en/08-operation/04-maintenance.md +++ b/docs/en/08-operation/04-maintenance.md @@ -15,7 +15,7 @@ TDengine is designed for various writing scenarios, and many of these scenarios ### Syntax -```SQL +```sql COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY']; SHOW COMPACTS [compact_id]; KILL COMPACT compact_id; @@ -41,7 +41,7 @@ KILL COMPACT compact_id; When one or more nodes in a multi-replica cluster restart due to upgrades or other reasons, it may lead to an imbalance in the load among the various dnodes in the cluster. In extreme cases, all vgroup leaders may be located on the same dnode. To solve this problem, you can use the following commands, which were first released in version 3.0.4.0. It is recommended to use the latest version as much as possible. -```SQL +```sql balance vgroup leader; # Rebalance all vgroup leaders balance vgroup leader on ; # Rebalance a vgroup leader balance vgroup leader database ; # Rebalance all vgroup leaders within a database diff --git a/docs/en/08-operation/12-multi.md b/docs/en/08-operation/12-multi.md index f2f464be1c..1d0b8ad6cb 100644 --- a/docs/en/08-operation/12-multi.md +++ b/docs/en/08-operation/12-multi.md @@ -121,7 +121,7 @@ The cost of using object storage services is related to the amount of data store When the TSDB time-series data exceeds the time specified by the `s3_keeplocal` parameter, the related data files will be split into multiple file blocks, each with a default size of 512 MB (`s3_chunkpages * tsdb_pagesize`). Except for the last file block, which is retained on the local file system, the rest of the file blocks are uploaded to the object storage service. -```math +```text Upload Count = Data File Size / (s3_chunkpages * tsdb_pagesize) - 1 ``` @@ -135,7 +135,7 @@ During query operations, if data in object storage needs to be accessed, TSDB do Adjacent multiple data pages are downloaded as a single data block from object storage to reduce the number of downloads. The size of each data page is specified by the `tsdb_pagesize` parameter when creating the database, with a default of 4 KB. -```math +```text Download Count = Number of Data Blocks Needed for Query - Number of Cached Data Blocks ``` @@ -155,7 +155,7 @@ For deployment methods, please refer to the [Flexify](https://azuremarketplace.m In the configuration file /etc/taos/taos.cfg, add parameters for S3 access: -```cfg +```text s3EndPoint http //20.191.157.23,http://20.191.157.24,http://20.191.157.25 s3AccessKey FLIOMMNL0:uhRNdeZMLD4wo,ABCIOMMN:uhRNdeZMD4wog,DEFOMMNL049ba:uhRNdeZMLD4wogXd s3BucketName td-test diff --git a/docs/en/08-operation/14-user.md b/docs/en/08-operation/14-user.md index 95c00bd21f..a503397fcc 100644 --- a/docs/en/08-operation/14-user.md +++ b/docs/en/08-operation/14-user.md @@ -18,7 +18,7 @@ create user user_name pass'password' [sysinfo {1|0}] The parameters are explained as follows. - user_name: Up to 23 B long. -- password: Up to 128 B long, valid characters include letters and numbers as well as special characters other than single and double quotes, apostrophes, backslashes, and spaces, and it cannot be empty. +- password: The password must be between 8 and 16 characters long and include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters. Special characters include `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`. - sysinfo: Whether the user can view system information. 1 means they can view it, 0 means they cannot. System information includes server configuration information, various node information such as dnode, query node (qnode), etc., as well as storage-related information, etc. The default is to view system information. The following SQL can create a user named test with the password 123456 who can view system information. diff --git a/docs/en/10-third-party/01-collection/09-emq-broker.md b/docs/en/10-third-party/01-collection/09-emq-broker.md index 24cb12997f..4f3eadcca6 100644 --- a/docs/en/10-third-party/01-collection/09-emq-broker.md +++ b/docs/en/10-third-party/01-collection/09-emq-broker.md @@ -140,7 +140,7 @@ Finally, click the "Create" button at the bottom left to save the rule. ## Write a Mock Test Program -```javascript +```js {{#include docs/examples/other/mock.js}} ``` diff --git a/docs/en/10-third-party/01-collection/11-kafka.md b/docs/en/10-third-party/01-collection/11-kafka.md index 2627e33b65..25ba7fd54a 100644 --- a/docs/en/10-third-party/01-collection/11-kafka.md +++ b/docs/en/10-third-party/01-collection/11-kafka.md @@ -95,7 +95,7 @@ curl http://localhost:8083/connectors If all components have started successfully, the following output will be displayed: -```txt +```text [] ``` @@ -181,7 +181,7 @@ If the above command is executed successfully, the following output will be disp Prepare a text file with test data, content as follows: -```txt title="test-data.txt" +```text title="test-data.txt" meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000 meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000 meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000 @@ -303,7 +303,7 @@ kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --t Output: -```txt +```text ...... meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000 meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000 diff --git a/docs/en/10-third-party/03-visual/01-grafana.mdx b/docs/en/10-third-party/03-visual/01-grafana.md similarity index 99% rename from docs/en/10-third-party/03-visual/01-grafana.mdx rename to docs/en/10-third-party/03-visual/01-grafana.md index bd2d313def..ec857f7795 100644 --- a/docs/en/10-third-party/03-visual/01-grafana.mdx +++ b/docs/en/10-third-party/03-visual/01-grafana.md @@ -60,7 +60,7 @@ Click `Save & Test` to test, if successful, it will prompt: `TDengine Data sourc For users using Grafana version 7.x or configuring with [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/), you can use the installation script on the Grafana server to automatically install the plugin and add the data source Provisioning configuration file. -```sh +```shell bash -c "$(curl -fsSL \ https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -- \ -a http://localhost:6041 \ @@ -77,7 +77,7 @@ Save the script and execute `./install.sh --help` to view detailed help document Use the [`grafana-cli` command line tool](https://grafana.com/docs/grafana/latest/administration/cli/) to install the plugin [installation](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation). -```bash +```shell grafana-cli plugins install tdengine-datasource # with sudo sudo -u grafana grafana-cli plugins install tdengine-datasource @@ -85,7 +85,7 @@ sudo -u grafana grafana-cli plugins install tdengine-datasource Alternatively, download the .zip file from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) to your local machine and unzip it into the Grafana plugins directory. Example command line download is as follows: -```bash +```shell GF_VERSION=3.5.1 # from GitHub wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip @@ -95,13 +95,13 @@ wget -O tdengine-datasource-$GF_VERSION.zip https://grafana.com/api/plugins/tden For CentOS 7.2 operating system, unzip the plugin package into the /var/lib/grafana/plugins directory and restart Grafana. -```bash +```shell sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/ ``` If Grafana is running in a Docker environment, you can use the following environment variable to set up automatic installation of the TDengine data source plugin: -```bash +```shell GF_INSTALL_PLUGINS=tdengine-datasource ``` @@ -120,7 +120,7 @@ Click `Save & Test` to test, if successful, it will prompt: `TDengine Data sourc Refer to [Grafana containerized installation instructions](https://grafana.com/docs/grafana/next/setup-grafana/installation/docker/#install-plugins-in-the-docker-container). Use the following command to start a container and automatically install the TDengine plugin: -```bash +```shell docker run -d \ -p 3000:3000 \ --name=grafana \ diff --git a/docs/en/14-reference/01-components/04-taosx.md b/docs/en/14-reference/01-components/04-taosx.md index b53d9f6777..8ce16f471d 100644 --- a/docs/en/14-reference/01-components/04-taosx.md +++ b/docs/en/14-reference/01-components/04-taosx.md @@ -31,7 +31,7 @@ The following parameter descriptions and examples use `` as a placehold In command line mode, taosX uses DSN to represent a data source (source or destination), a typical DSN is as follows: -```bash +```shell # url-like [+]://[[:@]:][/][?=[&=]] |------|------------|---|-----------|-----------|------|------|----------|-----------------------| @@ -390,7 +390,7 @@ You can view the log files or use the `journalctl` command to view the logs of ` The command to view logs under Linux using `journalctl` is as follows: -```bash +```shell journalctl -u taosx [-f] ``` @@ -572,7 +572,7 @@ uint32_t len: The binary length of this string (excluding `\0`). **Return Value**: -``` c +```c struct parser_resp_t { int e; // 0 if success. void* p; // Success if contains. @@ -589,7 +589,7 @@ When creation is successful, e = 0, p is the parser object. Parse the input payload and return the result in JSON format [u8]. The returned JSON will be fully decoded using the default JSON parser (expanding the root array and all objects). -``` c +```c const char* parser_mutate( void* parser, const uint8_t* in_ptr, uint32_t in_len, diff --git a/docs/en/14-reference/01-components/05-taosx-agent.md b/docs/en/14-reference/01-components/05-taosx-agent.md index 9353d45de3..3e8b4f4d63 100644 --- a/docs/en/14-reference/01-components/05-taosx-agent.md +++ b/docs/en/14-reference/01-components/05-taosx-agent.md @@ -26,7 +26,7 @@ The default configuration file for `Agent` is located at `/etc/taos/agent.toml`, As shown below: -```TOML +```toml # taosX service endpoint # #endpoint = "http://localhost:6055" @@ -83,7 +83,7 @@ You don't need to be confused about how to set up the configuration file. Read a On Linux systems, the `Agent` can be started with the Systemd command: -```bash +```shell systemctl start taosx-agent ``` @@ -95,6 +95,6 @@ You can view the log files or use the `journalctl` command to view the logs of t The command to view logs with `journalctl` on Linux is as follows: -```bash +```shell journalctl -u taosx-agent [-f] ``` diff --git a/docs/en/14-reference/01-components/06-taoskeeper.md b/docs/en/14-reference/01-components/06-taoskeeper.md index 570cf7305f..78556d888a 100644 --- a/docs/en/14-reference/01-components/06-taoskeeper.md +++ b/docs/en/14-reference/01-components/06-taoskeeper.md @@ -143,13 +143,13 @@ For details on TDengine monitoring configuration, please refer to: [TDengine Mon After installation, please use the `systemctl` command to start the taoskeeper service process. -```bash +```shell systemctl start taoskeeper ``` Check if the service is working properly: -```bash +```shell systemctl status taoskeeper ``` @@ -261,7 +261,7 @@ Query OK, 14 row(s) in set (0.006542s) You can view the most recent report record of a supertable, such as: -``` shell +```shell taos> select last_row(*) from taosd_dnodes_info; last_row(_ts) | last_row(disk_engine) | last_row(system_net_in) | last_row(vnodes_num) | last_row(system_net_out) | last_row(uptime) | last_row(has_mnode) | last_row(io_read_disk) | last_row(error_log_count) | last_row(io_read) | last_row(cpu_cores) | last_row(has_qnode) | last_row(has_snode) | last_row(disk_total) | last_row(mem_engine) | last_row(info_log_count) | last_row(cpu_engine) | last_row(io_write_disk) | last_row(debug_log_count) | last_row(disk_used) | last_row(mem_total) | last_row(io_write) | last_row(masters) | last_row(cpu_system) | last_row(trace_log_count) | last_row(mem_free) | ====================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== diff --git a/docs/en/14-reference/01-components/07-explorer.md b/docs/en/14-reference/01-components/07-explorer.md index 726b07d050..a9a2ebb356 100644 --- a/docs/en/14-reference/01-components/07-explorer.md +++ b/docs/en/14-reference/01-components/07-explorer.md @@ -14,7 +14,7 @@ taosExplorer does not require separate installation. Starting from TDengine vers Before starting taosExplorer, please make sure the content in the configuration file is correct. -```TOML +```toml # This is an automatically generated configuration file for Explorer in [TOML](https://toml.io/) format. # # Here is a full list of available options. @@ -148,7 +148,7 @@ Description: Then start taosExplorer, you can directly execute taos-explorer in the command line or use the systemctl command: -```bash +```shell systemctl start taos-explorer # Linux sc.exe start taos-explorer # Windows ``` diff --git a/docs/en/14-reference/01-components/12-tdinsight.mdx b/docs/en/14-reference/01-components/12-tdinsight.md similarity index 71% rename from docs/en/14-reference/01-components/12-tdinsight.mdx rename to docs/en/14-reference/01-components/12-tdinsight.md index 463ceaaca6..c9464760e7 100644 --- a/docs/en/14-reference/01-components/12-tdinsight.mdx +++ b/docs/en/14-reference/01-components/12-tdinsight.md @@ -171,7 +171,37 @@ Metric details: 5. **Writes**: Total number of writes 6. **Other**: Total number of other requests -There are also line charts for the above categories. +There are also line charts for the above categories. + +### Automatic import of preconfigured alert rules + +After summarizing user experience, 14 commonly used alert rules are sorted out. These alert rules can monitor key indicators of the TDengine cluster and report alerts, such as abnormal and exceeded indicators. +Starting from TDengine-Server 3.3.4.3 (TDengine-datasource 3.6.3), TDengine Datasource supports automatic import of preconfigured alert rules. You can import 14 alert rules to Grafana (version 11 or later) with one click. +In the TDengine-datasource setting interface, turn on the "Load Tengine Alert" switch, click the "Save & test" button, the plugin will automatically load the mentioned 14 alert rules. The rules will be placed in the Grafana alerts directory. If not required, turn off the "Load TDengine Alert" switch, and click the button next to "Clear TDengine Alert" to clear all the alert rules imported into this data source. + +After importing, click on "Alert rules" on the left side of the Grafana interface to view all current alert rules. By configuring contact points, users can receive alert notifications. + +The specific configuration of the 14 alert rules is as follows: + +| alert rule| Rule threshold| Behavior when no data | Data scanning interval |Duration | SQL | +| ------ | --------- | ---------------- | ----------- |------- |----------------------| +|CPU load of dnode node|average > 80%|Trigger alert|5 minutes|5 minutes |`select now(), dnode_id, last(cpu_system) as cup_use from log.taosd_dnodes_info where _ts >= (now- 5m) and _ts < now partition by dnode_id having first(_ts) > 0 `| +|Memory of dnode node |average > 60%|Trigger alert|5 minutes|5 minutes|`select now(), dnode_id, last(mem_engine) / last(mem_total) * 100 as taosd from log.taosd_dnodes_info where _ts >= (now- 5m) and _ts 80%|Trigger alert|5 minutes|5 minutes|`select now(), dnode_id, data_dir_level, data_dir_name, last(used) / last(total) * 100 as used from log.taosd_dnodes_data_dirs where _ts >= (now - 5m) and _ts < now partition by dnode_id, data_dir_level, data_dir_name`| +|Authorization expires |< 60天|Trigger alert|1 day|0 0 seconds|`select now(), cluster_id, last(grants_expire_time) / 86400 as expire_time from log.taosd_cluster_info where _ts >= (now - 24h) and _ts < now partition by cluster_id having first(_ts) > 0 `| +|The used measurement points has reached the authorized number|>= 90%|Trigger alert|1 day|0 seconds|`select now(), cluster_id, CASE WHEN max(grants_timeseries_total) > 0.0 THEN max(grants_timeseries_used) /max(grants_timeseries_total) * 100.0 ELSE 0.0 END AS result from log.taosd_cluster_info where _ts >= (now - 30s) and _ts < now partition by cluster_id having timetruncate(first(_ts), 1m) > 0`| +|Number of concurrent query requests | > 100|Do not trigger alert|1 minute|0 seconds|`select now() as ts, count(*) as slow_count from performance_schema.perf_queries`| +|Maximum time for slow query execution (no time window) |> 300秒|Do not trigger alert|1 minute|0 seconds|`select now() as ts, count(*) as slow_count from performance_schema.perf_queries where exec_usec>300000000`| +|dnode offline |total != alive|Trigger alert|30 seconds|0 seconds|`select now(), cluster_id, last(dnodes_total) - last(dnodes_alive) as dnode_offline from log.taosd_cluster_info where _ts >= (now -30s) and _ts < now partition by cluster_id having first(_ts) > 0`| +|vnode offline |total != alive|Trigger alert|30 seconds|0 seconds|`select now(), cluster_id, last(vnodes_total) - last(vnodes_alive) as vnode_offline from log.taosd_cluster_info where _ts >= (now - 30s) and _ts < now partition by cluster_id having first(_ts) > 0 `| +|Number of data deletion requests |> 0|Do not trigger alert|30 seconds|0 seconds|``select now(), count(`count`) as `delete_count` from log.taos_sql_req where sql_type = 'delete' and _ts >= (now -30s) and _ts < now``| +|Adapter RESTful request fail |> 5|Do not trigger alert|30 seconds|0 seconds|``select now(), sum(`fail`) as `Failed` from log.adapter_requests where req_type=0 and ts >= (now -30s) and ts < now``| +|Adapter WebSocket request fail |> 5|Do not trigger alert|30 seconds|0 seconds|``select now(), sum(`fail`) as `Failed` from log.adapter_requests where req_type=1 and ts >= (now -30s) and ts < now``| +|Dnode data reporting is missing |< 3|Trigger alert|180 seconds|0 seconds|`select now(), cluster_id, count(*) as dnode_report from log.taosd_cluster_info where _ts >= (now -180s) and _ts < now partition by cluster_id having timetruncate(first(_ts), 1h) > 0`| +|Restart dnode |max(update_time) > last(update_time)|Trigger alert|90 seconds|0 seconds|`select now(), dnode_id, max(uptime) - last(uptime) as dnode_restart from log.taosd_dnodes_info where _ts >= (now - 90s) and _ts < now partition by dnode_id`| + +TDengine users can modify and improve these alert rules according to their own business needs. In Grafana 7.5 and below versions, the Dashboard and Alert rules functions are combined, while in subsequent new versions, the two functions are separated. To be compatible with Grafana7.5 and below versions, an Alert Used Only panel has been added to the TDinsight panel, which is only required for Grafana7.5 and below versions. + ## Upgrade @@ -248,13 +278,13 @@ The new version of the plugin uses the Grafana unified alerting feature, the `-E Assuming you start the TDengine database on the host `tdengine` with HTTP API port `6041`, user `root1`, and password `pass5ord`. Execute the script: -```bash +```shell ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord ``` If you want to monitor multiple TDengine clusters, you need to set up multiple TDinsight dashboards. Setting up a non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and if using the built-in SMS alert feature, `-N` and `-L` should also be changed. -```bash +```shell sudo ./TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1' ``` diff --git a/docs/en/14-reference/02-tools/08-taos-cli.md b/docs/en/14-reference/02-tools/08-taos-cli.md index 1dd77de186..9309da831d 100644 --- a/docs/en/14-reference/02-tools/08-taos-cli.md +++ b/docs/en/14-reference/02-tools/08-taos-cli.md @@ -10,7 +10,7 @@ The TDengine command line program (hereinafter referred to as TDengine CLI) is t To enter the TDengine CLI, simply execute `taos` in the terminal. -```bash +```shell taos ``` @@ -81,7 +81,7 @@ There are many other parameters: Example: -```bash +```shell taos -h h1.taos.com -s "use db; show tables;" ``` diff --git a/docs/en/14-reference/02-tools/10-taosbenchmark.md b/docs/en/14-reference/02-tools/10-taosbenchmark.md index c41cff49e1..09227f210b 100644 --- a/docs/en/14-reference/02-tools/10-taosbenchmark.md +++ b/docs/en/14-reference/02-tools/10-taosbenchmark.md @@ -28,7 +28,7 @@ taosBenchmark supports comprehensive performance testing for TDengine, and the T Execute the following command to quickly experience taosBenchmark performing a write performance test on TDengine based on the default configuration. -```bash +```shell taosBenchmark ``` @@ -38,7 +38,7 @@ When running without parameters, taosBenchmark by default connects to the TDengi When running taosBenchmark using command line parameters and controlling its behavior, the `-f ` parameter cannot be used. All configuration parameters must be specified through the command line. Below is an example of using command line mode to test the write performance of taosBenchmark. -```bash +```shell taosBenchmark -I stmt -n 200 -t 100 ``` @@ -50,7 +50,7 @@ The taosBenchmark installation package includes examples of configuration files, Use the following command line to run taosBenchmark and control its behavior through a configuration file. -```bash +```shell taosBenchmark -f ``` diff --git a/docs/en/14-reference/03-taos-sql/03-table.md b/docs/en/14-reference/03-taos-sql/03-table.md index 3c047eb19e..b7a8bb5b1c 100644 --- a/docs/en/14-reference/03-taos-sql/03-table.md +++ b/docs/en/14-reference/03-taos-sql/03-table.md @@ -170,7 +170,7 @@ ALTER TABLE [db_name.]tb_name alter_table_clause alter_table_clause: { alter_table_options - | SET tag tag_name = new_tag_value,tag_name2=new_tag2_value... + | SET tag tag_name = new_tag_value, tag_name2=new_tag2_value ... } alter_table_options: @@ -194,7 +194,7 @@ alter_table_option: { ### Modify Subtable Tag Value ```sql -ALTER TABLE tb_name SET TAG tag_name1=new_tag_value1,tag_name2=new_tag_value2...; +ALTER TABLE tb_name SET TAG tag_name1=new_tag_value1, tag_name2=new_tag_value2 ...; ``` ### Modify Table Lifespan diff --git a/docs/en/14-reference/03-taos-sql/06-select.md b/docs/en/14-reference/03-taos-sql/06-select.md index 5c71697110..c33fef95fb 100644 --- a/docs/en/14-reference/03-taos-sql/06-select.md +++ b/docs/en/14-reference/03-taos-sql/06-select.md @@ -210,19 +210,19 @@ However, renaming individual columns is not supported for `first(*)`, `last(*)`, Retrieve all subtable names and related tag information from a supertable: -```mysql +```sql SELECT TAGS TBNAME, location FROM meters; ``` It is recommended that users query the subtable tag information of supertables using the INS_TAGS system table under INFORMATION_SCHEMA, for example, to get all subtable names and tag values of the supertable meters: -```mysql +```sql SELECT table_name, tag_name, tag_type, tag_value FROM information_schema.ins_tags WHERE stable_name='meters'; ``` Count the number of subtables under a supertable: -```mysql +```sql SELECT COUNT(*) FROM (SELECT DISTINCT TBNAME FROM meters); ``` @@ -385,7 +385,7 @@ SELECT CURRENT_USER(); ### Syntax -```txt +```text WHERE (column|tbname) match/MATCH/nmatch/NMATCH _regex_ ``` @@ -403,7 +403,7 @@ The length of the regular match string cannot exceed 128 bytes. You can set and ### Syntax -```txt +```text CASE value WHEN compare_value THEN result [WHEN compare_value THEN result ...] [ELSE result] END CASE WHEN condition THEN result [WHEN condition THEN result ...] [ELSE result] END ``` @@ -493,7 +493,7 @@ SELECT ... FROM (SELECT ... FROM ...) ...; ## UNION ALL Clause -```txt title=Syntax +```text title=Syntax SELECT ... UNION ALL SELECT ... [UNION ALL SELECT ...] diff --git a/docs/en/14-reference/03-taos-sql/08-delete-data.mdx b/docs/en/14-reference/03-taos-sql/08-delete-data.md similarity index 100% rename from docs/en/14-reference/03-taos-sql/08-delete-data.mdx rename to docs/en/14-reference/03-taos-sql/08-delete-data.md diff --git a/docs/en/14-reference/03-taos-sql/10-function.md b/docs/en/14-reference/03-taos-sql/10-function.md index fa6b8d333a..4eb7b083fb 100644 --- a/docs/en/14-reference/03-taos-sql/10-function.md +++ b/docs/en/14-reference/03-taos-sql/10-function.md @@ -417,7 +417,7 @@ MOD(expr1, expr2) **Example**: -``` sql +```sql taos> select mod(10,3); mod(10,3) | ============================ @@ -454,7 +454,7 @@ RAND([seed]) **Example**: -``` sql +```sql taos> select rand(); rand() | ============================ diff --git a/docs/en/14-reference/03-taos-sql/25-user.md b/docs/en/14-reference/03-taos-sql/25-user.md index 6eebf9513e..eaf1041aa0 100644 --- a/docs/en/14-reference/03-taos-sql/25-user.md +++ b/docs/en/14-reference/03-taos-sql/25-user.md @@ -13,14 +13,14 @@ CREATE USER user_name PASS 'password' [SYSINFO {1|0}]; The username can be up to 23 bytes long. -The password can be up to 31 bytes long. The password can include letters, numbers, and special characters except for single quotes, double quotes, backticks, backslashes, and spaces, and it cannot be an empty string. +The password must be between 8 and 16 characters long and include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters. Special characters include `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`. `SYSINFO` indicates whether the user can view system information. `1` means they can view, `0` means they have no permission to view. System information includes service configuration, dnode, vnode, storage, etc. The default value is `1`. -In the example below, we create a user with the password `123456` who can view system information. +In the example below, we create a user with the password `abc123!@#` who can view system information. ```sql -taos> create user test pass '123456' sysinfo 1; +taos> create user test pass 'abc123!@#' sysinfo 1; Query OK, 0 of 0 rows affected (0.001254s) ``` diff --git a/docs/en/14-reference/03-taos-sql/32-compress.md b/docs/en/14-reference/03-taos-sql/32-compress.md index efe1959b1a..30b107b632 100644 --- a/docs/en/14-reference/03-taos-sql/32-compress.md +++ b/docs/en/14-reference/03-taos-sql/32-compress.md @@ -28,13 +28,14 @@ In this document, it specifically refers to the internal levels of the second-le - Default compression algorithms list and applicable range for each data type -| Data Type | Available Encoding Algorithms | Default Encoding Algorithm | Available Compression Algorithms|Default Compression Algorithm| Default Compression Level| -| :-----------:|:----------:|:-------:|:-------:|:----------:|:----:| -| tinyint/untinyint/smallint/usmallint/int/uint | simple8b| simple8b | lz4/zlib/zstd/xz| lz4 | medium| -| bigint/ubigint/timestamp | simple8b/delta-i | delta-i |lz4/zlib/zstd/xz | lz4| medium| -|float/double | delta-d|delta-d |lz4/zlib/zstd/xz/tsz|lz4| medium| -|binary/nchar| disabled| disabled|lz4/zlib/zstd/xz| lz4| medium| -|bool| bit-packing| bit-packing| lz4/zlib/zstd/xz| lz4| medium| +| Data Type |Available Encoding Algorithms | Default Encoding Algorithm | Available Compression Algorithms | Default Compression Algorithm | Default Compression Level | +|:------------------------------------:|:-------------------------:|:-----------:|:--------------------:|:----:|:------:| +| int/uint | disabled/simple8b | simple8b | lz4/zlib/zstd/xz | lz4 | medium | +| tinyint/untinyint/smallint/usmallint | disabled/simple8b | simple8b | lz4/zlib/zstd/xz | zlib | medium | +| bigint/ubigint/timestamp | disabled/simple8b/delta-i | delta-i | lz4/zlib/zstd/xz | lz4 | medium | +| float/double | disabled/delta-d | delta-d | lz4/zlib/zstd/xz/tsz | lz4 | medium | +| binary/nchar | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium | +| bool | disabled/bit-packing | bit-packing | lz4/zlib/zstd/xz | zstd | medium | ## SQL Syntax diff --git a/docs/en/14-reference/05-connector/10-cpp.mdx b/docs/en/14-reference/05-connector/10-cpp.md similarity index 100% rename from docs/en/14-reference/05-connector/10-cpp.mdx rename to docs/en/14-reference/05-connector/10-cpp.md diff --git a/docs/en/14-reference/05-connector/14-java.mdx b/docs/en/14-reference/05-connector/14-java.md similarity index 100% rename from docs/en/14-reference/05-connector/14-java.mdx rename to docs/en/14-reference/05-connector/14-java.md diff --git a/docs/en/14-reference/05-connector/20-go.mdx b/docs/en/14-reference/05-connector/20-go.md similarity index 99% rename from docs/en/14-reference/05-connector/20-go.mdx rename to docs/en/14-reference/05-connector/20-go.md index 48716ec428..5f94d38299 100644 --- a/docs/en/14-reference/05-connector/20-go.mdx +++ b/docs/en/14-reference/05-connector/20-go.md @@ -126,7 +126,7 @@ For the source code of the example programs, please refer to: [Example Programs] The Data Source Name has a generic format, similar to [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php), but without the type prefix (brackets indicate optional): -``` text +```text [username[:password]@][protocol[(address)]]/[dbname][?param1=value1&...¶mN=valueN] ``` diff --git a/docs/en/14-reference/05-connector/26-rust.mdx b/docs/en/14-reference/05-connector/26-rust.md similarity index 100% rename from docs/en/14-reference/05-connector/26-rust.mdx rename to docs/en/14-reference/05-connector/26-rust.md diff --git a/docs/en/14-reference/05-connector/30-python.mdx b/docs/en/14-reference/05-connector/30-python.md similarity index 100% rename from docs/en/14-reference/05-connector/30-python.mdx rename to docs/en/14-reference/05-connector/30-python.md diff --git a/docs/en/14-reference/05-connector/35-node.mdx b/docs/en/14-reference/05-connector/35-node.md similarity index 100% rename from docs/en/14-reference/05-connector/35-node.mdx rename to docs/en/14-reference/05-connector/35-node.md diff --git a/docs/en/14-reference/05-connector/40-csharp.mdx b/docs/en/14-reference/05-connector/40-csharp.md similarity index 100% rename from docs/en/14-reference/05-connector/40-csharp.mdx rename to docs/en/14-reference/05-connector/40-csharp.md diff --git a/docs/en/14-reference/05-connector/43-r-lang.mdx b/docs/en/14-reference/05-connector/43-r-lang.md similarity index 100% rename from docs/en/14-reference/05-connector/43-r-lang.mdx rename to docs/en/14-reference/05-connector/43-r-lang.md diff --git a/docs/en/14-reference/05-connector/45-php.mdx b/docs/en/14-reference/05-connector/45-php.md similarity index 100% rename from docs/en/14-reference/05-connector/45-php.mdx rename to docs/en/14-reference/05-connector/45-php.md diff --git a/docs/en/14-reference/05-connector/50-odbc.mdx b/docs/en/14-reference/05-connector/50-odbc.md similarity index 100% rename from docs/en/14-reference/05-connector/50-odbc.mdx rename to docs/en/14-reference/05-connector/50-odbc.md diff --git a/docs/en/14-reference/05-connector/60-rest-api.mdx b/docs/en/14-reference/05-connector/60-rest-api.md similarity index 99% rename from docs/en/14-reference/05-connector/60-rest-api.mdx rename to docs/en/14-reference/05-connector/60-rest-api.md index a63022f547..88e53f7618 100644 --- a/docs/en/14-reference/05-connector/60-rest-api.mdx +++ b/docs/en/14-reference/05-connector/60-rest-api.md @@ -21,7 +21,7 @@ Below is an example using the `curl` tool in an Ubuntu environment (please confi The following example lists all databases, please replace `h1.tdengine.com` and 6041 (default value) with the actual running TDengine service FQDN and port number: -```bash +```shell curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" \ -d "select name, ntables, status from information_schema.ins_databases;" \ h1.tdengine.com:6041/rest/sql @@ -100,13 +100,13 @@ The BODY of the HTTP request contains a complete SQL statement. The data table i Use `curl` to initiate an HTTP Request with custom authentication as follows: -```bash +```shell curl -L -H "Authorization: Basic " -d "" :/rest/sql/[db_name][?tz=timezone[&req_id=req_id][&row_with_meta=true]] ``` Or, -```bash +```shell curl -L -u username:password -d "" :/rest/sql/[db_name][?tz=timezone[&req_id=req_id][&row_with_meta=true]] ``` @@ -279,7 +279,7 @@ Column types use the following strings: Prepare data -```bash +```shell create database demo use demo create table t(ts timestamp,c1 varbinary(20),c2 geometry(100)) @@ -288,7 +288,7 @@ insert into t values(now,'\x7f8290','point(100 100)') Execute query -```bash +```shell curl --location 'http://:/rest/sql' \ --header 'Content-Type: text/plain' \ --header 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' \ @@ -428,7 +428,7 @@ Data Query Return Example HTTP requests need to include an authorization code ``, used for identity verification. The authorization code is usually provided by the administrator and can be simply obtained by sending an `HTTP GET` request as follows: -```bash +```shell curl http://:/rest/login// ``` @@ -440,7 +440,7 @@ Here, `fqdn` is the FQDN or IP address of the TDengine database, `port` is the p Example of obtaining an authorization code: -```bash +```shell curl http://192.168.0.1:6041/rest/login/root/taosdata ``` @@ -457,7 +457,7 @@ Return value: - Query all records of table d1001 in the demo database: - ```bash + ```shell curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql curl -L -H "Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql ``` @@ -509,7 +509,7 @@ Return value: - Create database demo: - ```bash + ```shell curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql curl -L -H "Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" -d "create database demo" 192.168.0.1:6041/rest/sql ``` @@ -560,7 +560,7 @@ Return value: #### TDengine 2.x response codes and message bodies -```JSON +```json { "status": "succ", "head": [ @@ -624,7 +624,7 @@ Return value: #### TDengine 3.0 Response Codes and Message Body -```JSON +```json { "code": 0, "column_meta": [ diff --git a/docs/en/14-reference/09-error-code.md b/docs/en/14-reference/09-error-code.md index 1d3e32a9e3..ee86b7ac96 100644 --- a/docs/en/14-reference/09-error-code.md +++ b/docs/en/14-reference/09-error-code.md @@ -119,7 +119,7 @@ This document details the server error codes that may be encountered when using | 0x80000350 | User already exists | Create user, duplicate creation | Confirm if the operation is correct | | 0x80000351 | Invalid user | User does not exist | Confirm if the operation is correct | | 0x80000352 | Invalid user format | Incorrect format | Confirm if the operation is correct | -| 0x80000353 | Invalid password format | Incorrect format | Confirm if the operation is correct | +| 0x80000353 | Invalid password format | The password must be between 8 and 16 characters long and include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters. | Confirm the format of the password string | | 0x80000354 | Can not get user from conn | Internal error | Report issue | | 0x80000355 | Too many users | (Enterprise only) Exceeding user limit | Adjust configuration | | 0x80000357 | Authentication failure | Incorrect password | Confirm if the operation is correct | diff --git a/docs/en/27-train-faq/index.md b/docs/en/27-train-faq/index.md index 3b3f7a8e17..ca6cd91714 100644 --- a/docs/en/27-train-faq/index.md +++ b/docs/en/27-train-faq/index.md @@ -90,7 +90,7 @@ Batch insertion. Each insert statement can insert multiple records into one tabl When inserting nchar type data containing Chinese characters on Windows, first ensure that the system's regional settings are set to China (this can be set in the Control Panel). At this point, the `taos` client in cmd should already be working properly; if developing a Java application in an IDE, such as Eclipse or IntelliJ, ensure that the file encoding in the IDE is set to GBK (which is the default encoding type for Java), then initialize the client configuration when creating the Connection, as follows: -```JAVA +```java Class.forName("com.taosdata.jdbc.TSDBDriver"); Properties properties = new Properties(); properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8"); @@ -145,7 +145,7 @@ Version 3.0 of TDengine includes a standalone component developed in Go called ` The Go language version requirement is 1.14 or higher. If there are Go compilation errors, often due to issues accessing Go mod in China, they can be resolved by setting Go environment variables: -```sh +```shell go env -w GO111MODULE=on go env -w GOPROXY=https://goproxy.cn,direct ``` @@ -196,7 +196,7 @@ Here are the solutions: 1. Create a file /Library/LaunchDaemons/limit.maxfiles.plist, write the following content (the example changes limit and maxfiles to 100,000, modify as needed): -```plist +```xml @@ -286,4 +286,14 @@ This connection only reports the most basic information that does not involve an This feature is an optional configuration item, which is enabled by default in the open-source version. The specific parameter is telemetryReporting, as explained in the [official documentation](../tdengine-reference/components/taosd/). You can disable this parameter at any time by modifying telemetryReporting to 0 in taos.cfg, then restarting the database service. Code located at: [https://github.com/taosdata/TDengine/blob/62e609c558deb764a37d1a01ba84bc35115a85a4/source/dnode/mnode/impl/src/mndTelem.c](https://github.com/taosdata/TDengine/blob/62e609c558deb764a37d1a01ba84bc35115a85a4/source/dnode/mnode/impl/src/mndTelem.c). -Additionally, for the highly secure enterprise version, TDengine Enterprise, this parameter will not be operational. +Additionally, for the highly secure enterprise version, TDengine Enterprise, this parameter will not be operational. + +### 31 What should I do if I encounter 'Sync leader is unreachable' when connecting to the cluster for the first time? + +Reporting this error indicates that the first connection to the cluster was successful, but the IP address accessed for the first time was not the leader of mnode. An error occurred when the client attempted to establish a connection with the leader. The client searches for the leader node through EP, which specifies the fqdn and port number. There are two common reasons for this error: + +- The ports of other dnodes in the cluster are not open +- The client's hosts file is not configured correctly + +Therefore, first, check whether all ports on the server and cluster (default 6030 for native connections and 6041 for HTTP connections) are open; Next, check if the client's hosts file has configured the fqdn and IP information for all dnodes in the cluster. +If the issue still cannot be resolved, it is necessary to contact Taos technical personnel for support. diff --git a/docs/zh/06-advanced/06-TDgpt/02-management.md b/docs/zh/06-advanced/06-TDgpt/02-management.md index ef1206fc04..b37c39944f 100644 --- a/docs/zh/06-advanced/06-TDgpt/02-management.md +++ b/docs/zh/06-advanced/06-TDgpt/02-management.md @@ -4,11 +4,10 @@ sidebar_label: "安装部署" --- ### 环境准备 -使用 TDgpt 的高级时序数据分析功能需要在 TDengine 集群中安装部署 AI node(Anode)。Anode 可以运行在 Linux/Windows/MacOS 等平台上,同时需要 3.10 或以上版本的 Python 环境支持。 +使用 TDgpt 的高级时序数据分析功能需要在 TDengine 集群中安装部署 AI node(Anode)。Anode 运行在 Linux 平台上,并需要 3.10 或以上版本的 Python 环境支持。 > 部署 Anode 需要 TDengine Enterprise 3.3.4.3 及以后版本,请首先确认搭配 Anode 使用的 TDengine 能够支持 Anode。 ### 安装及卸载 -不同操作系统上安装及部署 Anode 有一些差异,主要是卸载操作、安装路径、服务启停等方面。本文以 Linux 系统为例,说明安装部署的流程。 使用 Linux 环境下的安装包 TDengine-enterprise-anode-1.x.x.tar.gz 可进行 Anode 的安装部署工作,命令如下: ```bash @@ -37,7 +36,7 @@ systemctl status taosanoded |/usr/local/taos/taosanode/bin|可执行文件目录| |/usr/local/taos/taosanode/resource|资源文件目录,链接到文件夹 /var/lib/taos/taosanode/resource/| |/usr/local/taos/taosanode/lib|库文件目录| -|/var/lib/taos/taosanode/model/|模型文件目录,链接到文件夹 /var/lib/taos/taosanode/model| +|/usr/local/taos/taosanode/model/|模型文件目录,链接到文件夹 /var/lib/taos/taosanode/model| |/var/log/taos/taosanode/|日志文件目录| |/etc/taos/taosanode.ini|配置文件| @@ -64,7 +63,7 @@ pidfile = /usr/local/taos/taosanode/taosanode.pid # conflict with systemctl, so do NOT uncomment this # daemonize = /var/log/taos/taosanode/taosanode.log -# log directory +# uWSGI log files logto = /var/log/taos/taosanode/taosanode.log # wWSGI monitor port @@ -74,7 +73,7 @@ stats = 127.0.0.1:8387 virtualenv = /usr/local/taos/taosanode/venv/ [taosanode] -# default app log file +# default taosanode log file app-log = /var/log/taos/taosanode/taosanode.app.log # model storage directory diff --git a/docs/zh/06-advanced/06-TDgpt/03-preprocess.md b/docs/zh/06-advanced/06-TDgpt/03-preprocess.md index 9efd2bdf11..b63cae0740 100644 --- a/docs/zh/06-advanced/06-TDgpt/03-preprocess.md +++ b/docs/zh/06-advanced/06-TDgpt/03-preprocess.md @@ -12,7 +12,7 @@ import wndata from './pic/white-noise-data.png' 预处理流程 TDgpt 首先对输入数据进行白噪声检查(White Noise Data check), 检查通过以后针对预测分析,还要进行输入(历史)数据的重采样和时间戳对齐处理(异常检测跳过数据重采样和时间戳对齐步骤)。 -预处理完成以后,再进行预测或异常检测操作。预处理过程部署于预测或异常检测处理逻辑的一部分。 +预处理完成以后,再进行预测或异常检测操作。预处理过程不属于预测或异常检测处理逻辑的一部分。 ### 白噪声检查 diff --git a/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md b/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md index c7388ab9c0..3981fff8c6 100644 --- a/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md +++ b/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md @@ -14,7 +14,7 @@ description: 预测算法 ```bash taos> select * from foo; - ts | k | + ts | i32 | ======================================== 2020-01-01 00:00:12.681 | 13 | 2020-01-01 00:00:13.727 | 14 | @@ -42,7 +42,7 @@ algo=expr1 ``` 1. `column_expr`:预测的时序数据列。与异常检测相同,只支持数值类型列输入。 -2. `options`:异常检测函数的参数,使用规则与 anomaly_window 相同。预测支持 `conf`, `every`, `rows`, `start`, `rows` 几个控制参数,其含义如下: +2. `options`:预测函数的参数。字符串类型,其中使用 K=V 方式调用算法及相关参数。采用逗号分隔的 K=V 字符串表示,其中的字符串不需要使用单引号、双引号、或转义号等符号,不能使用中文及其他宽字符。预测支持 `conf`, `every`, `rows`, `start`, `rows` 几个控制参数,其含义如下: ### 参数说明 diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md b/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md index 954076c8fd..0584c87311 100644 --- a/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md +++ b/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md @@ -34,7 +34,8 @@ return { ```python import numpy as np -from service import AbstractForecastService +from taosanalytics.service import AbstractForecastService + # 算法实现类名称 需要以下划线 "_" 开始,并以 Service 结束 class _MyForecastService(AbstractForecastService): @@ -51,12 +52,12 @@ class _MyForecastService(AbstractForecastService): super().__init__() def execute(self): - """ 算法逻辑的核心实现""" + """ 算法逻辑的核心实现""" res = [] """这个预测算法固定返回 1 作为预测值,预测值的数量是用户通过 self.fc_rows 指定""" ts_list = [self.start_ts + i * self.time_step for i in range(self.fc_rows)] - res.app(ts_list) # 设置预测结果时间戳列 + res.append(ts_list) # 设置预测结果时间戳列 """生成全部为 1 的预测结果 """ res_list = [1] * self.fc_rows @@ -64,18 +65,18 @@ class _MyForecastService(AbstractForecastService): """检查用户输入,是否要求返回预测置信区间上下界""" if self.return_conf: - """对于没有计算预测置信区间上下界的算法,直接返回预测值作为上下界即可""" - bound_list = [1] * self.fc_rows - res.append(bound_list) # 预测结果置信区间下界 - res.append(bound_list) # 预测结果执行区间上界 + """对于没有计算预测置信区间上下界的算法,直接返回预测值作为上下界即可""" + bound_list = [1] * self.fc_rows + res.append(bound_list) # 预测结果置信区间下界 + res.append(bound_list) # 预测结果执行区间上界 """返回结果""" - return { "res": res, "mse": 0} + return {"res": res, "mse": 0} - def set_params(self, params): - """该算法无需任何输入参数,直接重载父类该函数,不处理算法参数设置逻辑""" - pass + """该算法无需任何输入参数,直接调用父类函数,不处理算法参数设置逻辑""" + return super().set_params(params) + ``` 将该文件保存在 `./taosanalytics/algo/fc/` 目录下,然后重启 taosanode 服务。在 TDengine 命令行接口中执行 `SHOW ANODES FULL` 能够看到新加入的算法。应用就可以通过 SQL 语句调用该预测算法。 diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md b/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md index 5b49db330e..c48ce42836 100644 --- a/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md +++ b/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md @@ -16,7 +16,7 @@ sidebar_label: "异常检测" ```python import numpy as np -from service import AbstractAnomalyDetectionService +from taosanalytics.service import AbstractAnomalyDetectionService # 算法实现类名称 需要以下划线 "_" 开始,并以 Service 结束 class _MyAnomalyDetectionService(AbstractAnomalyDetectionService): diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/index.md b/docs/zh/06-advanced/06-TDgpt/06-dev/index.md index 072a66c7d3..bcd972df8e 100644 --- a/docs/zh/06-advanced/06-TDgpt/06-dev/index.md +++ b/docs/zh/06-advanced/06-TDgpt/06-dev/index.md @@ -19,24 +19,25 @@ Anode的主要目录结构如下图所示 ```bash . +├── bin ├── cfg -├── model -│   └── ad_autoencoder -├── release -├── script -└── taosanalytics - ├── algo - │   ├── ad - │   └── fc - ├── misc - └── test +├── lib +│   └── taosanalytics +│   ├── algo +│   │   ├── ad +│   │   └── fc +│   ├── misc +│   └── test +├── log -> /var/log/taos/taosanode +├── model -> /var/lib/taos/taosanode/model +└── venv -> /var/lib/taos/taosanode/venv ``` |目录|说明| |---|---| |taosanalytics| 源代码目录,其下包含了算法具体保存目录 algo,放置杂项目录 misc,单元测试和集成测试目录 test。 algo 目录下 ad 保存异常检测算法代码,fc 目录保存预测算法代码| -|script|是安装脚本和发布脚本放置目录| +|venv| Python 虚拟环境| |model|放置针对数据集完成的训练模型| |cfg|配置文件目录| @@ -63,7 +64,8 @@ Anode采用算法自动加载模式,因此只识别符合命名约定的 Pytho ```SQL --- algo 后面的参数 name 即为类属性 `name` -SELECT COUNT(*) FROM foo ANOMALY_WINDOW(col_name, 'algo=name') +SELECT COUNT(*) +FROM foo ANOMALY_WINDOW(col_name, 'algo=name') ``` ## 添加具有模型的分析算法 @@ -76,19 +78,10 @@ SELECT COUNT(*) FROM foo ANOMALY_WINDOW(col_name, 'algo=name') ```bash . -├── cfg -├── model -│   └── ad_autoencoder -│   ├── ad_autoencoder_foo.dat -│   └── ad_autoencoder_foo.info -├── release -├── script -└── taosanalytics - ├── algo - │   ├── ad - │   └── fc - ├── misc - └── test +└── model + └── ad_autoencoder + ├── ad_autoencoder_foo.dat + └── ad_autoencoder_foo.info ``` diff --git a/docs/zh/08-operation/14-user.md b/docs/zh/08-operation/14-user.md index 03a838462f..56b514d703 100644 --- a/docs/zh/08-operation/14-user.md +++ b/docs/zh/08-operation/14-user.md @@ -16,14 +16,14 @@ create user user_name pass'password' [sysinfo {1|0}] ``` 相关参数说明如下。 -- user_name:最长为 23 B。 -- password:最长为 128 B,合法字符包括字母和数字以及单双引号、撇号、反斜杠和空格以外的特殊字符,且不可以为空。 +- user_name:用户名最长不超过 23 个字节。 +- password:密码长度必须为 8 到 16 位,并且至少包含大写字母、小写字母、数字、特殊字符中的三类。特殊字符包括 `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`。 - sysinfo :用户是否可以查看系统信息。1 表示可以查看,0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息,如 dnode、查询节点(qnode)等,以及与存储相关的信息等。默认为可以查看系统信息。 -如下 SQL 可以创建密码为 123456 且可以查看系统信息的用户 test。 +如下 SQL 可以创建密码为 abc123!@# 且可以查看系统信息的用户 test。 ```sql -create user test pass '123456' sysinfo 1 +create user test pass 'abc123!@#' sysinfo 1 ``` ### 查看用户 diff --git a/docs/zh/14-reference/01-components/12-tdinsight/assets/Alert-Used-Only.webp b/docs/zh/14-reference/01-components/12-tdinsight/assets/Alert-Used-Only.webp new file mode 100644 index 0000000000..8824ee5889 Binary files /dev/null and b/docs/zh/14-reference/01-components/12-tdinsight/assets/Alert-Used-Only.webp differ diff --git a/docs/zh/14-reference/01-components/12-tdinsight/assets/Alert-rules.webp b/docs/zh/14-reference/01-components/12-tdinsight/assets/Alert-rules.webp new file mode 100644 index 0000000000..e9f91c0488 Binary files /dev/null and b/docs/zh/14-reference/01-components/12-tdinsight/assets/Alert-rules.webp differ diff --git a/docs/zh/14-reference/01-components/12-tdinsight/assets/TDengine-Alert.webp b/docs/zh/14-reference/01-components/12-tdinsight/assets/TDengine-Alert.webp new file mode 100644 index 0000000000..a0d9b695f4 Binary files /dev/null and b/docs/zh/14-reference/01-components/12-tdinsight/assets/TDengine-Alert.webp differ diff --git a/docs/zh/14-reference/01-components/12-tdinsight/index.mdx b/docs/zh/14-reference/01-components/12-tdinsight/index.mdx index e5f44a1080..0f6ea46ffd 100644 --- a/docs/zh/14-reference/01-components/12-tdinsight/index.mdx +++ b/docs/zh/14-reference/01-components/12-tdinsight/index.mdx @@ -145,6 +145,44 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源的使用情况和状态, 还有上述分类的细分维度折线图。 +### 预配置告警规则自动导入 + +涛思总结用户使用经验,整理出 14 个常用的告警规则(alert rule),能够对集群关键指标进行监测并及时上报指标异常、超限等告警信息。 +从 TDengine-server 3.3.4.3 版本(tdengine-datasource 3.6.3)开始,TDengine Datasource 支持预配置告警规则自动导入功能,用户可将 14 个告警规则一键导入 Grafana(11 及以上版本),直接使用。 +预配置告警规则导入方法如下图所示,在 tdengine-datasource setting 界面,打开 “Load Tengine Alert” 开关,点击 “Save & test” 按钮后,插件会自动加载上述告警规则, 规则会放入以数据源名称 + “-alert” 的 grafana 告警目录中。如不需要,关闭 “Load TDengine Alert” 开关,点击 “Clear TDengine Alert” 旁边的按钮则会清除此数据源已导入的所有告警规则。 + +![TDengine Alert](./assets/TDengine-Alert.webp) + +导入后,点击 Grafana 左侧 “Alert rules” ,可查看当前所有告警规则。 +用户只需配置联络点(Contact points),即可获取告警通知。联络点配置方法见[告警配置](https://docs.taosdata.com/third-party/visual/grafana/#%E5%91%8A%E8%AD%A6%E9%85%8D%E7%BD%AE)。 + +![Alert-rules](./assets/Alert-rules.webp) + +14 个告警规则具体配置如下: + +| 规则名称| 规则阈值| 无监控数据时的行为 | 数据扫描间隔 |持续时间 | 执行SQL | +| ------ | --------- | ---------------- | ----------- |------- |----------------------| +|dnode 节点的CPU负载|均值 > 80%|触发告警|5分钟|5分钟 |`select now(), dnode_id, last(cpu_system) as cup_use from log.taosd_dnodes_info where _ts >= (now- 5m) and _ts < now partition by dnode_id having first(_ts) > 0 `| +|dnode 节点的的内存 |均值 > 60%|触发告警|5分钟|5分钟|`select now(), dnode_id, last(mem_engine) / last(mem_total) * 100 as taosd from log.taosd_dnodes_info where _ts >= (now- 5m) and _ts 80%|触发告警|5分钟|5分钟|`select now(), dnode_id, data_dir_level, data_dir_name, last(used) / last(total) * 100 as used from log.taosd_dnodes_data_dirs where _ts >= (now - 5m) and _ts < now partition by dnode_id, data_dir_level, data_dir_name`| +|集群授权到期 |< 60天|触发告警|1天|0秒|`select now(), cluster_id, last(grants_expire_time) / 86400 as expire_time from log.taosd_cluster_info where _ts >= (now - 24h) and _ts < now partition by cluster_id having first(_ts) > 0 `| +|测点数达到授权测点数|>= 90%|触发告警|1天|0秒|`select now(), cluster_id, CASE WHEN max(grants_timeseries_total) > 0.0 THEN max(grants_timeseries_used) /max(grants_timeseries_total) * 100.0 ELSE 0.0 END AS result from log.taosd_cluster_info where _ts >= (now - 30s) and _ts < now partition by cluster_id having timetruncate(first(_ts), 1m) > 0`| +|查询并发请求数 | > 100|不触发报警|1分钟|0秒|`select now() as ts, count(*) as slow_count from performance_schema.perf_queries`| +|慢查询执行最长时间 (无时间窗口) |> 300秒|不触发报警|1分钟|0秒|`select now() as ts, count(*) as slow_count from performance_schema.perf_queries where exec_usec>300000000`| +|dnode下线 |total != alive|触发告警|30秒|0秒|`select now(), cluster_id, last(dnodes_total) - last(dnodes_alive) as dnode_offline from log.taosd_cluster_info where _ts >= (now -30s) and _ts < now partition by cluster_id having first(_ts) > 0`| +|vnode下线 |total != alive|触发告警|30秒|0秒|`select now(), cluster_id, last(vnodes_total) - last(vnodes_alive) as vnode_offline from log.taosd_cluster_info where _ts >= (now - 30s) and _ts < now partition by cluster_id having first(_ts) > 0 `| +|数据删除请求数 |> 0|不触发报警|30秒|0秒|``select now(), count(`count`) as `delete_count` from log.taos_sql_req where sql_type = 'delete' and _ts >= (now -30s) and _ts < now``| +|Adapter RESTful 请求失败 |> 5|不触发报警|30秒|0秒|``select now(), sum(`fail`) as `Failed` from log.adapter_requests where req_type=0 and ts >= (now -30s) and ts < now``| +|Adapter WebSocket 请求失败 |> 5|不触发报警|30秒|0秒|``select now(), sum(`fail`) as `Failed` from log.adapter_requests where req_type=1 and ts >= (now -30s) and ts < now``| +|dnode 数据上报缺少 |< 3|触发告警|180秒|0秒|`select now(), cluster_id, count(*) as dnode_report from log.taosd_cluster_info where _ts >= (now -180s) and _ts < now partition by cluster_id having timetruncate(first(_ts), 1h) > 0`| +|dnode 重启 |max(update_time) > last(update_time)|触发告警|90秒|0秒|`select now(), dnode_id, max(uptime) - last(uptime) as dnode_restart from log.taosd_dnodes_info where _ts >= (now - 90s) and _ts < now partition by dnode_id`| + +用户可参考上述告警规则,根据自己业务需求进行修改与完善。 +Grafana7.5 及以下版本,Dashboards 与 Alert rules 功能合在一起,而之后的新版本两个功能是分开的。为兼容 Grafana7.5 及以下版本,TDinsight 面板中增加了 Alert Used Only 面板,仅 Grafana7.5 及以下版本需要使用。 + +![Alert Used Only](./assets/Alert-Used-Only.webp) + + ## 升级 下面三种方式都可以进行升级: - 用图形界面,若有新版本,可以在 ”TDengine Datasource“ 插件页面点击 update 升级。 @@ -155,10 +193,11 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源的使用情况和状态, 针对不同的安装方式,卸载时: - 用图形界面,在 ”TDengine Datasource“ 插件页面点击 ”Uninstall“ 卸载。 - 通过 `TDinsight.sh` 脚本安装的 TDinsight,可以使用命令行 `TDinsight.sh -R` 清理相关资源。 -- 手动安装的 TDinsight,要完全卸载,需要清理以下内容: +- 手动安装的 TDinsight,要完全卸载,需要按照顺序清理以下内容: 1. Grafana 中的 TDinsight Dashboard。 - 2. Grafana 中的 Data Source 数据源。 - 3. 从插件安装目录删除 `tdengine-datasource` 插件。 + 2. Grafana 中的 Alert rules 告警规则。 + 3. Grafana 中的 Data Source 数据源。 + 4. 从插件安装目录删除 `tdengine-datasource` 插件。 ## 附录 diff --git a/docs/zh/14-reference/02-tools/08-taos-cli.md b/docs/zh/14-reference/02-tools/08-taos-cli.md index adecc5f760..5b204da4c2 100644 --- a/docs/zh/14-reference/02-tools/08-taos-cli.md +++ b/docs/zh/14-reference/02-tools/08-taos-cli.md @@ -54,7 +54,7 @@ taos> SET MAX_BINARY_DISPLAY_WIDTH ; - -h HOST: 要连接的 TDengine 服务端所在服务器的 FQDN, 默认为连接本地服务 - -P PORT: 指定服务端所用端口号 - -u USER: 连接时使用的用户名 -- -p PASSWORD: 连接服务端时使用的密码 +- -p PASSWORD: 连接服务端时使用的密码,特殊字符如 `! & ( ) < > ; |` 需使用字符 `\` 进行转义处理 - -?, --help: 打印出所有命令行参数 还有更多其他参数: diff --git a/docs/zh/14-reference/03-taos-sql/02-database.md b/docs/zh/14-reference/03-taos-sql/02-database.md index 25d7e54823..867e1d7aa1 100644 --- a/docs/zh/14-reference/03-taos-sql/02-database.md +++ b/docs/zh/14-reference/03-taos-sql/02-database.md @@ -64,7 +64,7 @@ database_option: { - DURATION:数据文件存储数据的时间跨度。可以使用加单位的表示形式,如 DURATION 100h、DURATION 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。不加时间单位时默认单位为天,如 DURATION 50 表示 50 天。 - MAXROWS:文件块中记录的最大条数,默认为 4096 条。 - MINROWS:文件块中记录的最小条数,默认为 100 条。 -- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于3倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据从而释放存储空间。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。 +- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于3倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据从而释放存储空间。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](../../operation/planning/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。 - KEEP_TIME_OFFSET:自 3.2.0.0 版本生效。删除或迁移保存时间超过 KEEP 值的数据的延迟执行时间,默认值为 0 (小时)。在数据文件保存时间超过 KEEP 后,删除或迁移操作不会立即执行,而会额外等待本参数指定的时间间隔,以实现与业务高峰期错开的目的。 - STT_TRIGGER:表示落盘文件触发文件合并的个数。开源版本固定为 1,企业版本可设置范围为 1 到 16。对于少表高频写入场景,此参数建议使用默认配置;而对于多表低频写入场景,此参数建议配置较大的值。 - SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。 @@ -134,11 +134,11 @@ alter_database_option: { 1. 如何查看 cachesize? -通过 select * from information_schema.ins_databases; 可以查看这些 cachesize 的具体值。 +通过 select * from information_schema.ins_databases; 可以查看这些 cachesize 的具体值(单位为 MB)。。 2. 如何查看 cacheload? -通过 show \.vgroups; 可以查看 cacheload +通过 show \.vgroups; 可以查看 cacheload(单位为字节)。 3. 判断 cachesize 是否够用 diff --git a/docs/zh/14-reference/03-taos-sql/03-table.md b/docs/zh/14-reference/03-taos-sql/03-table.md index a9f9ddb517..ca2170db8b 100644 --- a/docs/zh/14-reference/03-taos-sql/03-table.md +++ b/docs/zh/14-reference/03-taos-sql/03-table.md @@ -171,7 +171,7 @@ ALTER TABLE [db_name.]tb_name alter_table_clause alter_table_clause: { alter_table_options - | SET TAG tag_name = new_tag_value,tag_name2=new_tag2_value... + | SET TAG tag_name = new_tag_value, tag_name2=new_tag2_value ... } alter_table_options: @@ -195,7 +195,7 @@ alter_table_option: { ### 修改子表标签值 ``` -ALTER TABLE tb_name SET TAG tag_name1=new_tag_value1,tag_name2=new_tag_value2...; +ALTER TABLE tb_name SET TAG tag_name1=new_tag_value1, tag_name2=new_tag_value2 ...; ``` ### 修改表生命周期 diff --git a/docs/zh/14-reference/03-taos-sql/08-delete-data.mdx b/docs/zh/14-reference/03-taos-sql/08-delete-data.mdx index 3f9c431338..ea03552795 100644 --- a/docs/zh/14-reference/03-taos-sql/08-delete-data.mdx +++ b/docs/zh/14-reference/03-taos-sql/08-delete-data.mdx @@ -6,7 +6,7 @@ title: "删除数据" 删除数据是 TDengine 提供的根据指定时间段删除指定表或超级表中数据记录的功能,方便用户清理由于设备故障等原因产生的异常数据。 -**注意**:删除数据并不会立即释放该表所占用的磁盘空间,而是把该表的数据标记为已删除,在查询时这些数据将不会再出现,但释放磁盘空间会延迟到系统自动或用户手动进行数据重整时。 +**注意**:删除数据并不会立即释放该表所占用的磁盘空间,而是把该表的数据标记为已删除,在查询时这些数据将不会再出现,但释放磁盘空间会延迟到系统自动清理(建库参数 keep 生效)或用户手动进行数据重整时(企业版功能 compact)。 **语法:** diff --git a/docs/zh/14-reference/03-taos-sql/10-function.md b/docs/zh/14-reference/03-taos-sql/10-function.md index 1d96db0969..7799e6f50e 100644 --- a/docs/zh/14-reference/03-taos-sql/10-function.md +++ b/docs/zh/14-reference/03-taos-sql/10-function.md @@ -1817,7 +1817,7 @@ ignore_null_values: { } ``` -**功能说明**:返回指定时间截面指定列的记录值或插值。ignore_null_values 参数的值可以是 0 或 1,为 1 时表示忽略 NULL 值, 缺省值为0。 +**功能说明**:返回指定时间截面指定列的记录值或插值。ignore_null_values 参数的值可以是 0 或 1,为 1 时表示忽略 NULL 值, 缺省值为 0。 **返回数据类型**:同字段类型。 @@ -1838,9 +1838,9 @@ ignore_null_values: { - INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0 版本以后支持)。 - INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0 版本以后支持)。 - INTERP 对于带复合主键的表的查询,若存在相同时间戳的数据,则只有对应的复合主键最小的数据参与运算。 -- INTERP 查询支持NEAR FILL模式, 即当需要FILL时, 使用距离当前时间点最近的数据进行插值, 当前后时间戳与当前时间断面一样近时, FILL 前一行的值. 此模式在流计算中和窗口查询中不支持。例如: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', '2023-01-01 00:10:00') FILL(NEAR)。(3.3.4.9版本及以后支持)。 -- INTERP 只有在使用FILL PREV/NEXT/NEAR 模式时才可以使用伪列 `_irowts_origin`。`_irowts_origin`在3.3.4.9版本及以后支持。 -- INTERP `RANEG`子句支持时间范围的扩展(3.3.4.9版本及以后支持), 如`RANGE('2023-01-01 00:00:00', 10s)`表示在时间点'2023-01-01 00:00:00'查找前后10s的数据进行插值, FILL PREV/NEXT/NEAR分别表示从时间点向前/向后/前后查找数据, 若时间点周围没有数据, 则使用FILL指定的值进行插值, 因此此时FILL子句必须指定值。例如: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', 10s) FILL(PREV, 1). 目前仅支持时间点和时间范围的组合, 不支持时间区间和时间范围的组合, 即不支持RANGE('2023-01-01 00:00:00', '2023-02-01 00:00:00', 1h)。所指定的时间范围规则与EVERY类似, 单位不能是年或月, 值不能为0, 不能带引号。使用该扩展时, 不支持除FILL PREV/NEXT/NEAR外的其他FILL模式, 且不能指定EVERY子句。 +- INTERP 查询支持 NEAR FILL 模式, 即当需要 FILL 时, 使用距离当前时间点最近的数据进行插值, 当前后时间戳与当前时间断面一样近时, FILL 前一行的值. 此模式在流计算中和窗口查询中不支持。例如: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', '2023-01-01 00:10:00') FILL(NEAR)(3.3.4.9 版本及以后支持)。 +- INTERP 只有在使用 FILL PREV/NEXT/NEAR 模式时才可以使用伪列 `_irowts_origin`。`_irowts_origin`在 3.3.4.9 版本及以后支持。 +- INTERP `RANGE`子句支持时间范围的扩展(3.3.4.9 版本及以后支持), 如`RANGE('2023-01-01 00:00:00', 10s)`表示在时间点 '2023-01-01 00:00:00' 查找前后 10s 的数据进行插值, FILL PREV/NEXT/NEAR 分别表示从时间点向前/向后/前后查找数据, 若时间点周围没有数据, 则使用 FILL 指定的值进行插值, 因此此时 FILL 子句必须指定值。例如: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', 10s) FILL(PREV, 1)。目前仅支持时间点和时间范围的组合, 不支持时间区间和时间范围的组合, 即不支持 RANGE('2023-01-01 00:00:00', '2023-02-01 00:00:00', 1h)。所指定的时间范围规则与 EVERY 类似, 单位不能是年或月, 值不能为 0, 不能带引号。使用该扩展时, 不支持除FILL PREV/NEXT/NEAR外的其他 FILL 模式, 且不能指定 EVERY 子句。 ### LAST diff --git a/docs/zh/14-reference/03-taos-sql/25-user.md b/docs/zh/14-reference/03-taos-sql/25-user.md index a77a5d6a67..b7fbd43fe3 100644 --- a/docs/zh/14-reference/03-taos-sql/25-user.md +++ b/docs/zh/14-reference/03-taos-sql/25-user.md @@ -14,14 +14,14 @@ CREATE USER user_name PASS 'password' [SYSINFO {1|0}]; 用户名最长不超过 23 个字节。 -密码最长不超过 31 个字节。密码可以包含字母、数字以及除单引号、双引号、反引号、反斜杠和空格以外的特殊字符,密码不能为空字符串。 +密码长度必须为 8 到 16 位,并且至少包含大写字母、小写字母、数字、特殊字符中的三类。特殊字符包括 `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`。 `SYSINFO` 表示该用户是否能够查看系统信息。`1` 表示可以查看,`0` 表示无权查看。系统信息包括服务配置、dnode、vnode、存储等信息。缺省值为 `1`。 -在下面的示例中,我们创建一个密码为 `123456` 且可以查看系统信息的用户。 +在下面的示例中,我们创建一个密码为 `abc123!@#` 且可以查看系统信息的用户。 ```sql -taos> create user test pass '123456' sysinfo 1; +taos> create user test pass 'abc123!@#' sysinfo 1; Query OK, 0 of 0 rows affected (0.001254s) ``` diff --git a/docs/zh/14-reference/03-taos-sql/32-compress.md b/docs/zh/14-reference/03-taos-sql/32-compress.md index 0f2b260832..60d03e0cc0 100644 --- a/docs/zh/14-reference/03-taos-sql/32-compress.md +++ b/docs/zh/14-reference/03-taos-sql/32-compress.md @@ -29,13 +29,15 @@ description: 可配置压缩算法 - 各个数据类型的默认压缩算法列表和适用范围 -| 数据类型 | 可选编码算法 | 编码算法默认值 | 可选压缩算法|压缩算法默认值| 压缩等级默认值| -| :-----------:|:----------:|:-------:|:-------:|:----------:|:----:| -| tinyint/untinyint/smallint/usmallint/int/uint | simple8b| simple8b | lz4/zlib/zstd/xz| lz4 | medium| -| bigint/ubigint/timestamp | simple8b/delta-i | delta-i |lz4/zlib/zstd/xz | lz4| medium| -|float/double | delta-d|delta-d |lz4/zlib/zstd/xz/tsz|lz4| medium| -|binary/nchar| disabled| disabled|lz4/zlib/zstd/xz| lz4| medium| -|bool| bit-packing| bit-packing| lz4/zlib/zstd/xz| lz4| medium| +| 数据类型 | 可选编码算法 | 编码算法默认值 | 可选压缩算法 | 压缩算法默认值 |压缩等级默认值| +|:------------------------------------:|:-------------------------:|:-----------:|:--------------------:|:----:|:------:| +| int/uint | disabled/simple8b | simple8b | lz4/zlib/zstd/xz | lz4 | medium | +| tinyint/untinyint/smallint/usmallint | disabled/simple8b | simple8b | lz4/zlib/zstd/xz | zlib | medium | +| bigint/ubigint/timestamp | disabled/simple8b/delta-i | delta-i | lz4/zlib/zstd/xz | lz4 | medium | +| float/double | disabled/delta-d | delta-d | lz4/zlib/zstd/xz/tsz | lz4 | medium | +| binary/nchar | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium | +| bool | disabled/bit-packing | bit-packing | lz4/zlib/zstd/xz | zstd | medium | + ## SQL 语法 diff --git a/docs/zh/14-reference/05-connector/14-java.mdx b/docs/zh/14-reference/05-connector/14-java.mdx index 17c10ba313..27f43676f3 100644 --- a/docs/zh/14-reference/05-connector/14-java.mdx +++ b/docs/zh/14-reference/05-connector/14-java.mdx @@ -172,7 +172,7 @@ WKB规范请参考[Well-Known Binary (WKB)](https://libgeos.org/specifications/w **原因**:程序没有找到依赖的本地函数库 taos。 -**解决方法**:Windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,Linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可,macOS 下需要建立软链 `ln -s /usr/local/lib/libtaos.dylib`。 +**解决方法**:Windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,Linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可,macOS 下需要建立软链 `ln -s /usr/local/lib/libtaos.dylib /usr/lib/libtaos.dylib`。 3. java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform diff --git a/docs/zh/14-reference/09-error-code.md b/docs/zh/14-reference/09-error-code.md index 685967ef83..c29dd57c8f 100644 --- a/docs/zh/14-reference/09-error-code.md +++ b/docs/zh/14-reference/09-error-code.md @@ -127,7 +127,7 @@ description: TDengine 服务端的错误码列表和详细说明 | 0x80000350 | User already exists | Create user, 重复创建 | 确认操作是否正确 | | 0x80000351 | Invalid user | 用户不存在 | 确认操作是否正确 | | 0x80000352 | Invalid user format | 格式不正确 | 确认操作是否正确 | -| 0x80000353 | Invalid password format | 格式不正确 | 确认操作是否正确 | +| 0x80000353 | Invalid password format | 密码长度必须为 8 到 16 位,并且至少包含大写字母、小写字母、数字、特殊字符中的三类 | 确认密码字符串的格式 | | 0x80000354 | Can not get user from conn | 内部错误 | 上报issue | | 0x80000355 | Too many users | (仅企业版)用户数量超限 | 调整配置 | | 0x80000357 | Authentication failure | 密码不正确 | 确认操作是否正确 | diff --git a/docs/zh/27-train-faq/01-faq.md b/docs/zh/27-train-faq/01-faq.md index af8468411c..ece7c9f309 100644 --- a/docs/zh/27-train-faq/01-faq.md +++ b/docs/zh/27-train-faq/01-faq.md @@ -280,4 +280,21 @@ TDinsight插件中展示的数据是通过taosKeeper和taosAdapter服务收集 https://docs.taosdata.com/reference/components/taosd/#%E7%9B%91%E6%8E%A7%E7%9B%B8%E5%85%B3 您可以随时关闭该参数,只需要在taos.cfg 中修改telemetryReporting为 0,然后重启数据库服务即可。 代码位于:https://github.com/taosdata/TDengine/blob/62e609c558deb764a37d1a01ba84bc35115a85a4/source/dnode/mnode/impl/src/mndTelem.c -此外,对于安全性要求极高的企业版 TDengine Enterprise 来说,此参数不会工作。 +此外,对于安全性要求极高的企业版 TDengine Enterprise 来说,此参数不会工作。 +### 31 第一次连接集群时遇到“Sync leader is unreachable”怎么办? +报这个错,说明第一次向集群的连接是成功的,但第一次访问的IP不是mnode的leader节点,客户端试图与leader建立连接时发生错误。客户端通过EP,也就是指定的fqdn与端口号寻找leader节点,常见的报错原因有两个: + +- 集群中其他节点的端口没有打开 +- 客户端的hosts未正确配置 + +因此用户首先要检查服务端,集群的所有端口(原生连接默认6030,http连接默认6041)有无打开;其次是客户端的hosts文件中是否配置了集群所有节点的fqdn与IP信息。 +如仍无法解决,则需要联系涛思技术人员支持。 + +### 32 同一台服务器,数据库的数据目录 dataDir 不变,为什么原有数据库丢失且集群 ID 发生了变化? +背景知识:TDengine 服务端进程(taosd)在启动时,若数据目录(dataDir,该目录在配置文件 taos.cfg 中指定)下不存在有效的数据文件子目录(如 mnode、dnode 和 vnode 等),则会自动创建这些目录。在创建新的 mnode 目录的同时,会分配一个新的集群 ID,从而产生一个新的集群。 + +原因分析:taosd 的数据目录 dataDir 可以指向多个不同的挂载点。如果这些挂载点未在 fstab 文件中配置自动挂载,服务器重启后,dataDir 将仅作为一个本地磁盘的普通目录存在,而未能按预期指向挂载的磁盘。此时,若 taosd 服务启动,它将在 dataDir 下新建目录,从而产生一个新的集群。 + +问题影响:服务器重启后,原有数据库丢失(注:并非真正丢失,只是原有的数据磁盘未挂载,暂时看不到)且集群 ID 发生变化,导致无法访问原有数据库。对于企业版用户,如果已针对集群 ID 进行授权,还会发现集群服务器的机器码未变,但原有的授权已失效。如果未针对该问题进行监控或者未及时发现并进行处理,则用户不会注意到原有数据库已经丢失,从而造成损失,增加运维成本。 + +问题解决:应在 fstab 文件中配置 dataDir 目录的自动挂载,确保 dataDir 始终指向预期的挂载点和目录,此时,再重启服务器,会找回原有的数据库和集群。在后续的版本中,我们将开发一个功能,使 taosd 在检测到启动前后 dataDir 发生变化时,在启动阶段退出,同时提供相应的错误提示。 \ No newline at end of file diff --git a/include/os/osString.h b/include/os/osString.h index 995aa4a591..f22d22b327 100644 --- a/include/os/osString.h +++ b/include/os/osString.h @@ -25,7 +25,7 @@ typedef int32_t TdUcs4; #if !defined(DISALLOW_NCHAR_WITHOUT_ICONV) && defined(DARWIN) #include "iconv.h" #else -typedef void *iconv_t; +typedef void *iconv_t; #endif typedef enum { M2C = 0, C2M } ConvType; @@ -55,29 +55,34 @@ typedef enum { M2C = 0, C2M } ConvType; #ifdef strndup #undef strndup #endif -#define strndup STR_TO_F_FUNC_TAOS_FORBID +#define strndup STR_TO_F_FUNC_TAOS_FORBID #endif #define tstrncpy(dst, src, size) \ do { \ (void)strncpy((dst), (src), (size)); \ - (dst)[(size) - 1] = 0; \ + (dst)[(size)-1] = 0; \ } while (0) int64_t tsnprintf(char *dst, int64_t size, const char *format, ...); -#define TAOS_STRCPY(_dst, _src) ((void)strcpy(_dst, _src)) +#define TAOS_STRCPY(_dst, _src) ((void)strcpy(_dst, _src)) #define TAOS_STRNCPY(_dst, _src, _size) ((void)strncpy(_dst, _src, _size)) -#define TAOS_STRCAT(_dst, _src) ((void)strcat(_dst, _src)) -#define TAOS_STRNCAT(_dst, _src, len) ((void)strncat(_dst, _src, len)) +#define TAOS_STRCAT(_dst, _src) ((void)strcat(_dst, _src)) +#define TAOS_STRNCAT(_dst, _src, len) ((void)strncat(_dst, _src, len)) char *tstrdup(const char *src); int32_t taosUcs4len(TdUcs4 *ucs4); int32_t taosStr2int64(const char *str, int64_t *val); -int32_t taosStr2int16(const char *str, int16_t *val); int32_t taosStr2int32(const char *str, int32_t *val); +int32_t taosStr2int16(const char *str, int16_t *val); int32_t taosStr2int8(const char *str, int8_t *val); +int32_t taosStr2Uint64(const char *str, uint64_t *val); +int32_t taosStr2Uint32(const char *str, uint32_t *val); +int32_t taosStr2Uint16(const char *str, uint16_t *val); +int32_t taosStr2Uint8(const char *str, uint8_t *val); + int32_t taosConvInit(void); void taosConvDestroy(); iconv_t taosAcquireConv(int32_t *idx, ConvType type); @@ -112,9 +117,9 @@ float taosStr2Float(const char *str, char **pEnd); int32_t taosHex2Ascii(const char *z, uint32_t n, void **data, uint32_t *size); int32_t taosAscii2Hex(const char *z, uint32_t n, void **data, uint32_t *size); char *taosStrndup(const char *s, int n); -//int32_t taosBin2Ascii(const char *z, uint32_t n, void** data, uint32_t* size); -bool isHex(const char* z, uint32_t n); -bool isValidateHex(const char* z, uint32_t n); +// int32_t taosBin2Ascii(const char *z, uint32_t n, void** data, uint32_t* size); +bool isHex(const char *z, uint32_t n); +bool isValidateHex(const char *z, uint32_t n); #ifdef __cplusplus } diff --git a/include/util/tdef.h b/include/util/tdef.h index 823c4bbe4b..41712ef443 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -318,6 +318,8 @@ typedef enum ELogicConditionType { #define TSDB_MAX_JSON_KEY_LEN 256 #define TSDB_AUTH_LEN 16 +#define TSDB_PASSWORD_MIN_LEN 8 +#define TSDB_PASSWORD_MAX_LEN 16 #define TSDB_PASSWORD_LEN 32 #define TSDB_USET_PASSWORD_LEN 129 #define TSDB_VERSION_LEN 32 diff --git a/include/util/tutil.h b/include/util/tutil.h index aa3b774e84..31b2343ba2 100644 --- a/include/util/tutil.h +++ b/include/util/tutil.h @@ -230,6 +230,11 @@ static FORCE_INLINE int32_t taosGetTbHashVal(const char *tbname, int32_t tblen, #define TAOS_UNUSED(expr) (void)(expr) +bool taosIsBigChar(char c); +bool taosIsSmallChar(char c); +bool taosIsNumberChar(char c); +bool taosIsSpecialChar(char c); + #ifdef __cplusplus } #endif diff --git a/packaging/delete_ref_lock.py b/packaging/delete_ref_lock.py index cf0e4cdd05..7200246a8a 100644 --- a/packaging/delete_ref_lock.py +++ b/packaging/delete_ref_lock.py @@ -1,59 +1,80 @@ import subprocess import re -# 执行 git fetch 命令并捕获输出 def git_fetch(): result = subprocess.run(['git', 'fetch'], capture_output=True, text=True) return result -# 解析分支名称 +def git_prune(): + # git remote prune origin + print("git remote prune origin") + result = subprocess.run(['git', 'remote', 'prune', 'origin'], capture_output=True, text=True) + return result + def parse_branch_name_type1(error_output): - # 使用正则表达式匹配 'is at' 前的分支名称 + # error: cannot lock ref 'refs/remotes/origin/fix/3.0/TD-32817': is at 7af5 but expected eaba + # match the branch name before ‘is at’ with a regular expression match = re.search(r"error: cannot lock ref '(refs/remotes/origin/[^']+)': is at", error_output) if match: return match.group(1) return None -# 解析第二种错误中的分支名称 def parse_branch_name_type2(error_output): - # 使用正则表达式匹配 'exists' 前的第一个引号内的分支名称 + # match the branch name before ‘exists; cannot create’ with a regular expression match = re.search(r"'(refs/remotes/origin/[^']+)' exists;", error_output) if match: return match.group(1) return None -# 执行 git update-ref -d 命令 +# parse branch name from error output of git remote prune origin +def parse_branch_name_type3(error_output): + # match the branch name before the first single quote before 'Unable to' with a regular expression + # git error: could not delete references: cannot lock ref 'refs/remotes/origin/test/3.0/TS-4893': Unable to create 'D:/workspace/main/TDinternal/community/.git/refs/remotes/origin/test/3.0/TS-4893.lock': File exists + match = re.search(r"references: cannot lock ref '(refs/remotes/origin/[^']+)': Unable to", error_output) + if match: + return match.group(1) + return None + + +# execute git update-ref -d to delete the ref def git_update_ref(branch_name): if branch_name: subprocess.run(['git', 'update-ref', '-d', f'{branch_name}'], check=True) -# 解析错误类型并执行相应的修复操作 +# parse error type and execute corresponding repair operation def handle_error(error_output): - # 错误类型1:本地引用的提交ID与远程不一致 - if "is at" in error_output and "but expected" in error_output: - branch_name = parse_branch_name_type1(error_output) - if branch_name: - print(f"Detected error type 1, attempting to delete ref for branch: {branch_name}") - git_update_ref(branch_name) - else: - print("Error parsing branch name for type 1.") - # 错误类型2:尝试创建新的远程引用时,本地已经存在同名的引用 - elif "exists; cannot create" in error_output: - branch_name = parse_branch_name_type2(error_output) - if branch_name: - print(f"Detected error type 2, attempting to delete ref for branch: {branch_name}") - git_update_ref(branch_name) - else: - print("Error parsing branch name for type 2.") + error_types = [ + ("is at", "but expected", parse_branch_name_type1, "type 1"), + ("exists; cannot create", None, parse_branch_name_type2, "type 2"), + ("Unable to create", "File exists", parse_branch_name_type3, "type 3") + ] + + for error_type in error_types: + if error_type[0] in error_output and (error_type[1] is None or error_type[1] in error_output): + branch_name = error_type[2](error_output) + if branch_name: + print(f"Detected error {error_type[3]}, attempting to delete ref for branch: {branch_name}") + git_update_ref(branch_name) + else: + print(f"Error parsing branch name for {error_type[3]}.") + break -# 主函数 def main(): fetch_result = git_fetch() - if fetch_result.returncode != 0: # 如果 git fetch 命令失败 + if fetch_result.returncode != 0: error_output = fetch_result.stderr handle_error(error_output) else: print("Git fetch successful.") + prune_result = git_prune() + print(prune_result.returncode) + if prune_result.returncode != 0: + error_output = prune_result.stderr + print(error_output) + handle_error(error_output) + else: + print("Git prune successful.") + if __name__ == "__main__": main() \ No newline at end of file diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 3a23d38375..9ce2f726a5 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -52,6 +52,22 @@ #define TMQ_META_VERSION "1.0" +static bool tmqAddJsonObjectItem(cJSON *object, const char *string, cJSON *item){ + bool ret = cJSON_AddItemToObject(object, string, item); + if (!ret){ + cJSON_Delete(item); + } + return ret; +} +static bool tmqAddJsonArrayItem(cJSON *array, cJSON *item){ + bool ret = cJSON_AddItemToArray(array, item); + if (!ret){ + cJSON_Delete(item); + } + return ret; +} + + static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen); static tb_uid_t processSuid(tb_uid_t suid, char* db) { return suid + MurmurHash3_32(db, strlen(db)); } static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id, int8_t t, @@ -68,41 +84,43 @@ static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* sche cJSON* type = cJSON_CreateString("create"); RAW_NULL_CHECK(type); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type)); cJSON* tableType = cJSON_CreateString(t == TSDB_NORMAL_TABLE ? "normal" : "super"); RAW_NULL_CHECK(tableType); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableType", tableType)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableType", tableType)); cJSON* tableName = cJSON_CreateString(name); RAW_NULL_CHECK(tableName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableName", tableName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableName", tableName)); cJSON* columns = cJSON_CreateArray(); RAW_NULL_CHECK(columns); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "columns", columns)); + for (int i = 0; i < schemaRow->nCols; i++) { cJSON* column = cJSON_CreateObject(); RAW_NULL_CHECK(column); + RAW_FALSE_CHECK(tmqAddJsonArrayItem(columns, column)); SSchema* s = schemaRow->pSchema + i; cJSON* cname = cJSON_CreateString(s->name); RAW_NULL_CHECK(cname); - RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "name", cname)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "name", cname)); cJSON* ctype = cJSON_CreateNumber(s->type); RAW_NULL_CHECK(ctype); - RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "type", ctype)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "type", ctype)); if (s->type == TSDB_DATA_TYPE_BINARY || s->type == TSDB_DATA_TYPE_VARBINARY || s->type == TSDB_DATA_TYPE_GEOMETRY) { int32_t length = s->bytes - VARSTR_HEADER_SIZE; cJSON* cbytes = cJSON_CreateNumber(length); RAW_NULL_CHECK(cbytes); - RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "length", cbytes)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "length", cbytes)); } else if (s->type == TSDB_DATA_TYPE_NCHAR) { int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; cJSON* cbytes = cJSON_CreateNumber(length); RAW_NULL_CHECK(cbytes); - RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "length", cbytes)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "length", cbytes)); } cJSON* isPk = cJSON_CreateBool(s->flags & COL_IS_KEY); RAW_NULL_CHECK(isPk); - RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "isPrimarykey", isPk)); - RAW_FALSE_CHECK(cJSON_AddItemToArray(columns, column)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "isPrimarykey", isPk)); if (pColCmprRow == NULL) { continue; @@ -124,44 +142,44 @@ static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* sche cJSON* encodeJson = cJSON_CreateString(encode); RAW_NULL_CHECK(encodeJson); - RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "encode", encodeJson)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "encode", encodeJson)); cJSON* compressJson = cJSON_CreateString(compress); RAW_NULL_CHECK(compressJson); - RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "compress", compressJson)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "compress", compressJson)); cJSON* levelJson = cJSON_CreateString(level); RAW_NULL_CHECK(levelJson); - RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "level", levelJson)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "level", levelJson)); } - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "columns", columns)); cJSON* tags = cJSON_CreateArray(); RAW_NULL_CHECK(tags); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tags", tags)); + for (int i = 0; schemaTag && i < schemaTag->nCols; i++) { cJSON* tag = cJSON_CreateObject(); RAW_NULL_CHECK(tag); + RAW_FALSE_CHECK(tmqAddJsonArrayItem(tags, tag)); SSchema* s = schemaTag->pSchema + i; cJSON* tname = cJSON_CreateString(s->name); RAW_NULL_CHECK(tname); - RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "name", tname)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "name", tname)); cJSON* ttype = cJSON_CreateNumber(s->type); RAW_NULL_CHECK(ttype); - RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "type", ttype)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "type", ttype)); if (s->type == TSDB_DATA_TYPE_BINARY || s->type == TSDB_DATA_TYPE_VARBINARY || s->type == TSDB_DATA_TYPE_GEOMETRY) { int32_t length = s->bytes - VARSTR_HEADER_SIZE; cJSON* cbytes = cJSON_CreateNumber(length); RAW_NULL_CHECK(cbytes); - RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "length", cbytes)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "length", cbytes)); } else if (s->type == TSDB_DATA_TYPE_NCHAR) { int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; cJSON* cbytes = cJSON_CreateNumber(length); RAW_NULL_CHECK(cbytes); - RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "length", cbytes)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "length", cbytes)); } - RAW_FALSE_CHECK(cJSON_AddItemToArray(tags, tag)); } - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tags", tags)); end: *pJson = json; @@ -175,7 +193,7 @@ static int32_t setCompressOption(cJSON* json, uint32_t para) { RAW_NULL_CHECK(encodeStr); cJSON* encodeJson = cJSON_CreateString(encodeStr); RAW_NULL_CHECK(encodeJson); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "encode", encodeJson)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "encode", encodeJson)); return code; } uint8_t compress = COMPRESS_L2_TYPE_U32(para); @@ -184,7 +202,7 @@ static int32_t setCompressOption(cJSON* json, uint32_t para) { RAW_NULL_CHECK(compressStr); cJSON* compressJson = cJSON_CreateString(compressStr); RAW_NULL_CHECK(compressJson); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "compress", compressJson)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "compress", compressJson)); return code; } uint8_t level = COMPRESS_L2_TYPE_LEVEL_U32(para); @@ -193,7 +211,7 @@ static int32_t setCompressOption(cJSON* json, uint32_t para) { RAW_NULL_CHECK(levelStr); cJSON* levelJson = cJSON_CreateString(levelStr); RAW_NULL_CHECK(levelJson); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "level", levelJson)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "level", levelJson)); return code; } @@ -214,19 +232,19 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON** RAW_NULL_CHECK(json); cJSON* type = cJSON_CreateString("alter"); RAW_NULL_CHECK(type); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type)); SName name = {0}; RAW_RETURN_CHECK(tNameFromString(&name, req.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE)); cJSON* tableType = cJSON_CreateString("super"); RAW_NULL_CHECK(tableType); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableType", tableType)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableType", tableType)); cJSON* tableName = cJSON_CreateString(name.tname); RAW_NULL_CHECK(tableName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableName", tableName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableName", tableName)); cJSON* alterType = cJSON_CreateNumber(req.alterType); RAW_NULL_CHECK(alterType); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "alterType", alterType)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "alterType", alterType)); switch (req.alterType) { case TSDB_ALTER_TABLE_ADD_TAG: case TSDB_ALTER_TABLE_ADD_COLUMN: { @@ -234,22 +252,22 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON** RAW_NULL_CHECK(field); cJSON* colName = cJSON_CreateString(field->name); RAW_NULL_CHECK(colName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName)); cJSON* colType = cJSON_CreateNumber(field->type); RAW_NULL_CHECK(colType); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colType", colType)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colType", colType)); if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY || field->type == TSDB_DATA_TYPE_GEOMETRY) { int32_t length = field->bytes - VARSTR_HEADER_SIZE; cJSON* cbytes = cJSON_CreateNumber(length); RAW_NULL_CHECK(cbytes); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes)); } else if (field->type == TSDB_DATA_TYPE_NCHAR) { int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; cJSON* cbytes = cJSON_CreateNumber(length); RAW_NULL_CHECK(cbytes); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes)); } break; } @@ -258,22 +276,22 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON** RAW_NULL_CHECK(field); cJSON* colName = cJSON_CreateString(field->name); RAW_NULL_CHECK(colName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName)); cJSON* colType = cJSON_CreateNumber(field->type); RAW_NULL_CHECK(colType); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colType", colType)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colType", colType)); if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY || field->type == TSDB_DATA_TYPE_GEOMETRY) { int32_t length = field->bytes - VARSTR_HEADER_SIZE; cJSON* cbytes = cJSON_CreateNumber(length); RAW_NULL_CHECK(cbytes); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes)); } else if (field->type == TSDB_DATA_TYPE_NCHAR) { int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; cJSON* cbytes = cJSON_CreateNumber(length); RAW_NULL_CHECK(cbytes); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes)); } RAW_RETURN_CHECK(setCompressOption(json, field->compress)); break; @@ -284,7 +302,7 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON** RAW_NULL_CHECK(field); cJSON* colName = cJSON_CreateString(field->name); RAW_NULL_CHECK(colName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName)); break; } case TSDB_ALTER_TABLE_UPDATE_TAG_BYTES: @@ -293,21 +311,21 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON** RAW_NULL_CHECK(field); cJSON* colName = cJSON_CreateString(field->name); RAW_NULL_CHECK(colName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName)); cJSON* colType = cJSON_CreateNumber(field->type); RAW_NULL_CHECK(colType); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colType", colType)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colType", colType)); if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY || field->type == TSDB_DATA_TYPE_GEOMETRY) { int32_t length = field->bytes - VARSTR_HEADER_SIZE; cJSON* cbytes = cJSON_CreateNumber(length); RAW_NULL_CHECK(cbytes); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes)); } else if (field->type == TSDB_DATA_TYPE_NCHAR) { int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; cJSON* cbytes = cJSON_CreateNumber(length); RAW_NULL_CHECK(cbytes); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes)); } break; } @@ -319,10 +337,10 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON** RAW_NULL_CHECK(newField); cJSON* colName = cJSON_CreateString(oldField->name); RAW_NULL_CHECK(colName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName)); cJSON* colNewName = cJSON_CreateString(newField->name); RAW_NULL_CHECK(colNewName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colNewName", colNewName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colNewName", colNewName)); break; } case TSDB_ALTER_TABLE_UPDATE_COLUMN_COMPRESS: { @@ -330,7 +348,7 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON** RAW_NULL_CHECK(field); cJSON* colName = cJSON_CreateString(field->name); RAW_NULL_CHECK(colName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName)); RAW_RETURN_CHECK(setCompressOption(json, field->bytes)); break; } @@ -391,51 +409,47 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) { int64_t id = pCreateReq->uid; uint8_t tagNum = pCreateReq->ctb.tagNum; int32_t code = 0; - cJSON* tags = NULL; + SArray* pTagVals = NULL; + char* pJson = NULL; + cJSON* tableName = cJSON_CreateString(name); RAW_NULL_CHECK(tableName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableName", tableName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableName", tableName)); cJSON* using = cJSON_CreateString(sname); RAW_NULL_CHECK(using); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "using", using)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "using", using)); cJSON* tagNumJson = cJSON_CreateNumber(tagNum); RAW_NULL_CHECK(tagNumJson); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tagNum", tagNumJson)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tagNum", tagNumJson)); - tags = cJSON_CreateArray(); + cJSON* tags = cJSON_CreateArray(); RAW_NULL_CHECK(tags); - SArray* pTagVals = NULL; + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tags", tags)); RAW_RETURN_CHECK(tTagToValArray(pTag, &pTagVals)); - if (tTagIsJson(pTag)) { STag* p = (STag*)pTag; if (p->nTag == 0) { uError("p->nTag == 0"); goto end; } - char* pJson = NULL; parseTagDatatoJson(pTag, &pJson); - if (pJson == NULL) { - uError("parseTagDatatoJson failed, pJson == NULL"); - goto end; - } + RAW_NULL_CHECK(pJson); cJSON* tag = cJSON_CreateObject(); RAW_NULL_CHECK(tag); + RAW_FALSE_CHECK(tmqAddJsonArrayItem(tags, tag)); STagVal* pTagVal = taosArrayGet(pTagVals, 0); RAW_NULL_CHECK(pTagVal); char* ptname = taosArrayGet(tagName, 0); RAW_NULL_CHECK(ptname); cJSON* tname = cJSON_CreateString(ptname); RAW_NULL_CHECK(tname); - RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "name", tname)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "name", tname)); cJSON* ttype = cJSON_CreateNumber(TSDB_DATA_TYPE_JSON); RAW_NULL_CHECK(ttype); - RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "type", ttype)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "type", ttype)); cJSON* tvalue = cJSON_CreateString(pJson); RAW_NULL_CHECK(tvalue); - RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "value", tvalue)); - RAW_FALSE_CHECK(cJSON_AddItemToArray(tags, tag)); - taosMemoryFree(pJson); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "value", tvalue)); goto end; } @@ -444,36 +458,34 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) { RAW_NULL_CHECK(pTagVal); cJSON* tag = cJSON_CreateObject(); RAW_NULL_CHECK(tag); + RAW_FALSE_CHECK(tmqAddJsonArrayItem(tags, tag)); char* ptname = taosArrayGet(tagName, i); RAW_NULL_CHECK(ptname); cJSON* tname = cJSON_CreateString(ptname); RAW_NULL_CHECK(tname); - RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "name", tname)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "name", tname)); cJSON* ttype = cJSON_CreateNumber(pTagVal->type); RAW_NULL_CHECK(ttype); - RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "type", ttype)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "type", ttype)); cJSON* tvalue = NULL; if (IS_VAR_DATA_TYPE(pTagVal->type)) { - char* buf = NULL; int64_t bufSize = 0; if (pTagVal->type == TSDB_DATA_TYPE_VARBINARY) { bufSize = pTagVal->nData * 2 + 2 + 3; } else { bufSize = pTagVal->nData + 3; } - buf = taosMemoryCalloc(bufSize, 1); - + char* buf = taosMemoryCalloc(bufSize, 1); RAW_NULL_CHECK(buf); - if (!buf) goto end; if (dataConverToStr(buf, bufSize, pTagVal->type, pTagVal->pData, pTagVal->nData, NULL) != TSDB_CODE_SUCCESS) { taosMemoryFree(buf); goto end; } tvalue = cJSON_CreateString(buf); - RAW_NULL_CHECK(tvalue); taosMemoryFree(buf); + RAW_NULL_CHECK(tvalue); } else { double val = 0; GET_TYPED_DATA(val, double, pTagVal->type, &pTagVal->i64); @@ -481,12 +493,11 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) { RAW_NULL_CHECK(tvalue); } - RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "value", tvalue)); - RAW_FALSE_CHECK(cJSON_AddItemToArray(tags, tag)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "value", tvalue)); } end: - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tags", tags)); + taosMemoryFree(pJson); taosArrayDestroy(pTagVals); } @@ -497,22 +508,23 @@ static void buildCreateCTableJson(SVCreateTbReq* pCreateReq, int32_t nReqs, cJSO RAW_NULL_CHECK(json); cJSON* type = cJSON_CreateString("create"); RAW_NULL_CHECK(type); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type)); cJSON* tableType = cJSON_CreateString("child"); RAW_NULL_CHECK(tableType); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableType", tableType)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableType", tableType)); buildChildElement(json, pCreateReq); cJSON* createList = cJSON_CreateArray(); RAW_NULL_CHECK(createList); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "createList", createList)); + for (int i = 0; nReqs > 1 && i < nReqs; i++) { cJSON* create = cJSON_CreateObject(); RAW_NULL_CHECK(create); buildChildElement(create, pCreateReq + i); - RAW_FALSE_CHECK(cJSON_AddItemToArray(createList, create)); + RAW_FALSE_CHECK(tmqAddJsonArrayItem(createList, create)); } - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "createList", createList)); end: *pJson = json; @@ -619,62 +631,62 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) { RAW_NULL_CHECK(json); cJSON* type = cJSON_CreateString("alter"); RAW_NULL_CHECK(type); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type)); cJSON* tableType = cJSON_CreateString(vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_TAG_VAL || vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL ? "child" : "normal"); RAW_NULL_CHECK(tableType); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableType", tableType)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableType", tableType)); cJSON* tableName = cJSON_CreateString(vAlterTbReq.tbName); RAW_NULL_CHECK(tableName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableName", tableName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableName", tableName)); cJSON* alterType = cJSON_CreateNumber(vAlterTbReq.action); RAW_NULL_CHECK(alterType); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "alterType", alterType)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "alterType", alterType)); switch (vAlterTbReq.action) { case TSDB_ALTER_TABLE_ADD_COLUMN: { cJSON* colName = cJSON_CreateString(vAlterTbReq.colName); RAW_NULL_CHECK(colName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName)); cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type); RAW_NULL_CHECK(colType); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colType", colType)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colType", colType)); if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY || vAlterTbReq.type == TSDB_DATA_TYPE_VARBINARY || vAlterTbReq.type == TSDB_DATA_TYPE_GEOMETRY) { int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE; cJSON* cbytes = cJSON_CreateNumber(length); RAW_NULL_CHECK(cbytes); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes)); } else if (vAlterTbReq.type == TSDB_DATA_TYPE_NCHAR) { int32_t length = (vAlterTbReq.bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; cJSON* cbytes = cJSON_CreateNumber(length); RAW_NULL_CHECK(cbytes); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes)); } break; } case TSDB_ALTER_TABLE_ADD_COLUMN_WITH_COMPRESS_OPTION: { cJSON* colName = cJSON_CreateString(vAlterTbReq.colName); RAW_NULL_CHECK(colName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName)); cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type); RAW_NULL_CHECK(colType); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colType", colType)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colType", colType)); if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY || vAlterTbReq.type == TSDB_DATA_TYPE_VARBINARY || vAlterTbReq.type == TSDB_DATA_TYPE_GEOMETRY) { int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE; cJSON* cbytes = cJSON_CreateNumber(length); RAW_NULL_CHECK(cbytes); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes)); } else if (vAlterTbReq.type == TSDB_DATA_TYPE_NCHAR) { int32_t length = (vAlterTbReq.bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; cJSON* cbytes = cJSON_CreateNumber(length); RAW_NULL_CHECK(cbytes); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes)); } RAW_RETURN_CHECK(setCompressOption(json, vAlterTbReq.compress)); break; @@ -682,43 +694,43 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) { case TSDB_ALTER_TABLE_DROP_COLUMN: { cJSON* colName = cJSON_CreateString(vAlterTbReq.colName); RAW_NULL_CHECK(colName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName)); break; } case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: { cJSON* colName = cJSON_CreateString(vAlterTbReq.colName); RAW_NULL_CHECK(colName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName)); cJSON* colType = cJSON_CreateNumber(vAlterTbReq.colModType); RAW_NULL_CHECK(colType); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colType", colType)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colType", colType)); if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY || vAlterTbReq.colModType == TSDB_DATA_TYPE_VARBINARY || vAlterTbReq.colModType == TSDB_DATA_TYPE_GEOMETRY) { int32_t length = vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE; cJSON* cbytes = cJSON_CreateNumber(length); RAW_NULL_CHECK(cbytes); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes)); } else if (vAlterTbReq.colModType == TSDB_DATA_TYPE_NCHAR) { int32_t length = (vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; cJSON* cbytes = cJSON_CreateNumber(length); RAW_NULL_CHECK(cbytes); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes)); } break; } case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: { cJSON* colName = cJSON_CreateString(vAlterTbReq.colName); RAW_NULL_CHECK(colName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName)); cJSON* colNewName = cJSON_CreateString(vAlterTbReq.colNewName); RAW_NULL_CHECK(colNewName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colNewName", colNewName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colNewName", colNewName)); break; } case TSDB_ALTER_TABLE_UPDATE_TAG_VAL: { cJSON* tagName = cJSON_CreateString(vAlterTbReq.tagName); RAW_NULL_CHECK(tagName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", tagName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", tagName)); bool isNull = vAlterTbReq.isNull; if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) { @@ -757,12 +769,12 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) { cJSON* colValue = cJSON_CreateString(buf); taosMemoryFree(buf); RAW_NULL_CHECK(colValue); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colValue", colValue)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colValue", colValue)); } cJSON* isNullCJson = cJSON_CreateBool(isNull); RAW_NULL_CHECK(isNullCJson); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colValueNull", isNullCJson)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colValueNull", isNullCJson)); break; } case TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL: { @@ -774,14 +786,17 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) { cJSON* tags = cJSON_CreateArray(); RAW_NULL_CHECK(tags); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tags", tags)); + for (int32_t i = 0; i < nTags; i++) { cJSON* member = cJSON_CreateObject(); RAW_NULL_CHECK(member); + RAW_FALSE_CHECK(tmqAddJsonArrayItem(tags, member)); SMultiTagUpateVal* pTagVal = taosArrayGet(vAlterTbReq.pMultiTag, i); cJSON* tagName = cJSON_CreateString(pTagVal->tagName); RAW_NULL_CHECK(tagName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(member, "colName", tagName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(member, "colName", tagName)); if (pTagVal->tagType == TSDB_DATA_TYPE_JSON) { uError("processAlterTable isJson false"); @@ -789,14 +804,13 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) { } bool isNull = pTagVal->isNull; if (!isNull) { - char* buf = NULL; int64_t bufSize = 0; if (pTagVal->tagType == TSDB_DATA_TYPE_VARBINARY) { bufSize = pTagVal->nTagVal * 2 + 2 + 3; } else { bufSize = pTagVal->nTagVal + 3; } - buf = taosMemoryCalloc(bufSize, 1); + char* buf = taosMemoryCalloc(bufSize, 1); RAW_NULL_CHECK(buf); if (dataConverToStr(buf, bufSize, pTagVal->tagType, pTagVal->pTagVal, pTagVal->nTagVal, NULL) != TSDB_CODE_SUCCESS) { @@ -806,21 +820,19 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) { cJSON* colValue = cJSON_CreateString(buf); taosMemoryFree(buf); RAW_NULL_CHECK(colValue); - RAW_FALSE_CHECK(cJSON_AddItemToObject(member, "colValue", colValue)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(member, "colValue", colValue)); } cJSON* isNullCJson = cJSON_CreateBool(isNull); RAW_NULL_CHECK(isNullCJson); - RAW_FALSE_CHECK(cJSON_AddItemToObject(member, "colValueNull", isNullCJson)); - RAW_FALSE_CHECK(cJSON_AddItemToArray(tags, member)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(member, "colValueNull", isNullCJson)); } - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tags", tags)); break; } case TSDB_ALTER_TABLE_UPDATE_COLUMN_COMPRESS: { cJSON* colName = cJSON_CreateString(vAlterTbReq.colName); RAW_NULL_CHECK(colName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName)); RAW_RETURN_CHECK(setCompressOption(json, vAlterTbReq.compress)); break; } @@ -858,13 +870,13 @@ static void processDropSTable(SMqMetaRsp* metaRsp, cJSON** pJson) { RAW_NULL_CHECK(json); cJSON* type = cJSON_CreateString("drop"); RAW_NULL_CHECK(type); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type)); cJSON* tableType = cJSON_CreateString("super"); RAW_NULL_CHECK(tableType); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableType", tableType)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableType", tableType)); cJSON* tableName = cJSON_CreateString(req.name); RAW_NULL_CHECK(tableName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableName", tableName)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableName", tableName)); end: uDebug("processDropSTable return"); @@ -897,10 +909,10 @@ static void processDeleteTable(SMqMetaRsp* metaRsp, cJSON** pJson) { RAW_NULL_CHECK(json); cJSON* type = cJSON_CreateString("delete"); RAW_NULL_CHECK(type); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type)); cJSON* sqlJson = cJSON_CreateString(sql); RAW_NULL_CHECK(sqlJson); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "sql", sqlJson)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "sql", sqlJson)); end: uDebug("processDeleteTable return"); @@ -928,16 +940,17 @@ static void processDropTable(SMqMetaRsp* metaRsp, cJSON** pJson) { RAW_NULL_CHECK(json); cJSON* type = cJSON_CreateString("drop"); RAW_NULL_CHECK(type); - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type)); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type)); cJSON* tableNameList = cJSON_CreateArray(); RAW_NULL_CHECK(tableNameList); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableNameList", tableNameList)); + for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { SVDropTbReq* pDropTbReq = req.pReqs + iReq; cJSON* tableName = cJSON_CreateString(pDropTbReq->name); RAW_NULL_CHECK(tableName); - RAW_FALSE_CHECK(cJSON_AddItemToArray(tableNameList, tableName)); + RAW_FALSE_CHECK(tmqAddJsonArrayItem(tableNameList, tableName)); } - RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableNameList", tableNameList)); end: uDebug("processDropTable return"); @@ -2183,6 +2196,8 @@ static void processBatchMetaToJson(SMqBatchMetaRsp* pMsgRsp, char** string) { RAW_FALSE_CHECK(cJSON_AddStringToObject(pJson, "tmq_meta_version", TMQ_META_VERSION)); cJSON* pMetaArr = cJSON_CreateArray(); RAW_NULL_CHECK(pMetaArr); + RAW_FALSE_CHECK(tmqAddJsonObjectItem(pJson, "metas", pMetaArr)); + int32_t num = taosArrayGetSize(rsp.batchMetaReq); for (int32_t i = 0; i < num; i++) { int32_t* len = taosArrayGet(rsp.batchMetaLen, i); @@ -2198,10 +2213,9 @@ static void processBatchMetaToJson(SMqBatchMetaRsp* pMsgRsp, char** string) { cJSON* pItem = NULL; processSimpleMeta(&metaRsp, &pItem); tDeleteMqMetaRsp(&metaRsp); - RAW_FALSE_CHECK(cJSON_AddItemToArray(pMetaArr, pItem)); + RAW_FALSE_CHECK(tmqAddJsonArrayItem(pMetaArr, pItem)); } - RAW_FALSE_CHECK(cJSON_AddItemToObject(pJson, "metas", pMetaArr)); tDeleteMqBatchMetaRsp(&rsp); char* fullStr = cJSON_PrintUnformatted(pJson); cJSON_Delete(pJson); diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp index 307ef7e06f..7cb7640907 100644 --- a/source/client/test/clientTests.cpp +++ b/source/client/test/clientTests.cpp @@ -300,7 +300,13 @@ void* doConsumeData(void* param) { int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); if (argc > 1) { - numOfThreads = atoi(argv[1]); + //numOfThreads = atoi(argv[1]); + int32_t code = taosStr2int32(argv[1], &numOfThreads); + if (code != 0) { + return code; + } + + } numOfThreads = TMAX(numOfThreads, 1); diff --git a/source/common/src/cos.c b/source/common/src/cos.c index a7e69ddc4c..3c72e21a4f 100644 --- a/source/common/src/cos.c +++ b/source/common/src/cos.c @@ -261,7 +261,7 @@ static void responseCompleteCallback(S3Status status, const S3ErrorDetails *erro for (int i = 0; i < error->extraDetailsCount; i++) { if (elen - len > 0) { len += tsnprintf(&(cbd->err_msg[len]), elen - len, " %s: %s\n", error->extraDetails[i].name, - error->extraDetails[i].value); + error->extraDetails[i].value); } } } @@ -742,9 +742,9 @@ upload: TAOS_CHECK_GOTO(TAOS_SYSTEM_ERROR(EIO), &lino, _exit); } n = tsnprintf(buf, sizeof(buf), - "%d" - "%s", - i + 1, manager.etags[i]); + "%d" + "%s", + i + 1, manager.etags[i]); size += growbuffer_append(&(manager.gb), buf, n); } size += growbuffer_append(&(manager.gb), "", strlen("")); @@ -908,10 +908,10 @@ upload: int n; for (int i = 0; i < cp.part_num; ++i) { n = tsnprintf(buf, sizeof(buf), - "%d" - "%s", - // i + 1, manager.etags[i]); - cp.parts[i].index + 1, cp.parts[i].etag); + "%d" + "%s", + // i + 1, manager.etags[i]); + cp.parts[i].index + 1, cp.parts[i].etag); size += growbuffer_append(&(manager.gb), buf, n); } size += growbuffer_append(&(manager.gb), "", strlen("")); @@ -1916,7 +1916,8 @@ void s3EvictCache(const char *path, long object_size) { } long s3Size(const char *object_name) { - long size = 0; + int32_t code = 0; + long size = 0; cos_pool_t *p = NULL; int is_cname = 0; @@ -1941,7 +1942,10 @@ long s3Size(const char *object_name) { if (cos_status_is_ok(s)) { char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); if (content_length_str != NULL) { - size = atol(content_length_str); + code = taosStr2Int64(content_length_str, &size); + if (code != 0) { + cos_warn_log("parse content length failed since %s\n", tstrerror(code)); + } } cos_warn_log("head object succeeded: %ld\n", size); } else { diff --git a/source/common/src/tcol.c b/source/common/src/tcol.c index a23385aba0..6a8fca0c48 100644 --- a/source/common/src/tcol.c +++ b/source/common/src/tcol.c @@ -251,7 +251,7 @@ bool checkColumnEncode(char encode[TSDB_CL_COMPRESS_OPTION_LEN]) { } bool checkColumnEncodeOrSetDefault(uint8_t type, char encode[TSDB_CL_COMPRESS_OPTION_LEN]) { if (0 == strlen(encode)) { - strncpy(encode, getDefaultEncodeStr(type), TSDB_CL_COMPRESS_OPTION_LEN); + tstrncpy(encode, getDefaultEncodeStr(type), TSDB_CL_COMPRESS_OPTION_LEN); return true; } return checkColumnEncode(encode) && validColEncode(type, columnEncodeVal(encode)); @@ -268,7 +268,7 @@ bool checkColumnCompress(char compress[TSDB_CL_COMPRESS_OPTION_LEN]) { } bool checkColumnCompressOrSetDefault(uint8_t type, char compress[TSDB_CL_COMPRESS_OPTION_LEN]) { if (0 == strlen(compress)) { - strncpy(compress, getDefaultCompressStr(type), TSDB_CL_COMPRESS_OPTION_LEN); + tstrncpy(compress, getDefaultCompressStr(type), TSDB_CL_COMPRESS_OPTION_LEN); return true; } @@ -290,7 +290,7 @@ bool checkColumnLevel(char level[TSDB_CL_COMPRESS_OPTION_LEN]) { } bool checkColumnLevelOrSetDefault(uint8_t type, char level[TSDB_CL_COMPRESS_OPTION_LEN]) { if (0 == strlen(level)) { - strncpy(level, getDefaultLevelStr(type), TSDB_CL_COMPRESS_OPTION_LEN); + tstrncpy(level, getDefaultLevelStr(type), TSDB_CL_COMPRESS_OPTION_LEN); return true; } return checkColumnLevel(level) && validColCompressLevel(type, columnLevelVal(level)); @@ -314,7 +314,7 @@ void setColLevel(uint32_t* compress, uint8_t level) { int32_t setColCompressByOption(uint8_t type, uint8_t encode, uint16_t compressType, uint8_t level, bool check, uint32_t* compress) { - if(compress == NULL) return TSDB_CODE_TSC_ENCODE_PARAM_ERROR; + if (compress == NULL) return TSDB_CODE_TSC_ENCODE_PARAM_ERROR; if (check && !validColEncode(type, encode)) return TSDB_CODE_TSC_ENCODE_PARAM_ERROR; setColEncode(compress, encode); diff --git a/source/common/src/tmisce.c b/source/common/src/tmisce.c index 8988fab56a..5ed0c78569 100644 --- a/source/common/src/tmisce.c +++ b/source/common/src/tmisce.c @@ -22,12 +22,15 @@ int32_t taosGetFqdnPortFromEp(const char* ep, SEp* pEp) { pEp->port = 0; memset(pEp->fqdn, 0, TSDB_FQDN_LEN); - strncpy(pEp->fqdn, ep, TSDB_FQDN_LEN - 1); + tstrncpy(pEp->fqdn, ep, TSDB_FQDN_LEN); char* temp = strchr(pEp->fqdn, ':'); if (temp) { *temp = 0; - pEp->port = atoi(temp + 1); + pEp->port = taosStr2UInt16(temp + 1, NULL, 10); + if (pEp->port < 0) { + return TSDB_CODE_INVALID_PARA; + } } if (pEp->port == 0) { @@ -282,7 +285,7 @@ int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol) { locked = 1; while ((pItem = cfgNextIter(pIter)) != NULL) { -_start: + _start: col = startCol; // GRANT_CFG_SKIP; @@ -297,11 +300,11 @@ _start: TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, name, false), NULL, _exit); - char value[TSDB_CONFIG_PATH_LEN + VARSTR_HEADER_SIZE] = {0}; - int32_t valueLen = 0; + char value[TSDB_CONFIG_PATH_LEN + VARSTR_HEADER_SIZE] = {0}; + int32_t valueLen = 0; SDiskCfg* pDiskCfg = NULL; if (strcasecmp(pItem->name, "dataDir") == 0 && exSize > 0) { - char* buf = &value[VARSTR_HEADER_SIZE]; + char* buf = &value[VARSTR_HEADER_SIZE]; pDiskCfg = taosArrayGet(pItem->array, index); valueLen = tsnprintf(buf, TSDB_CONFIG_PATH_LEN, "%s", pDiskCfg->dir); index++; @@ -334,7 +337,7 @@ _start: if (strcasecmp(pItem->name, "dataDir") == 0 && pDiskCfg) { char* buf = &info[VARSTR_HEADER_SIZE]; valueLen = tsnprintf(buf, TSDB_CONFIG_INFO_LEN, "level %d primary %d disabled %" PRIi8, pDiskCfg->level, - pDiskCfg->primary, pDiskCfg->disable); + pDiskCfg->primary, pDiskCfg->disable); } else { valueLen = 0; } @@ -351,7 +354,7 @@ _start: if (index > 0 && index <= exSize) { goto _start; } -} + } pBlock->info.rows = numOfRows; _exit: if (locked) cfgUnLock(pConf); diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c index ade5e16894..a30a79fa73 100644 --- a/source/dnode/mgmt/exe/dmMain.c +++ b/source/dnode/mgmt/exe/dmMain.c @@ -251,7 +251,7 @@ static int32_t dmParseArgs(int32_t argc, char const *argv[]) { printf("ERROR: Encrypt key overflow, it should be at most %d characters\n", ENCRYPT_KEY_LEN); return TSDB_CODE_INVALID_CFG; } - tstrncpy(global.encryptKey, argv[i], ENCRYPT_KEY_LEN); + tstrncpy(global.encryptKey, argv[i], ENCRYPT_KEY_LEN + 1); } else { printf("'-y' requires a parameter\n"); return TSDB_CODE_INVALID_CFG; diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index 78cc35a62c..82f29ac40b 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -114,7 +114,7 @@ static void dmMayShouldUpdateAnalFunc(SDnodeMgmt *pMgmt, int64_t newVer) { .pCont = pHead, .contLen = contLen, .msgType = TDMT_MND_RETRIEVE_ANAL_ALGO, - .info.ahandle = (void *)0x9527, + .info.ahandle = 0, .info.refId = 0, .info.noResp = 0, .info.handle = 0, diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index 90b3f0025d..3b399e10d4 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -186,7 +186,7 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) { #if defined(TD_ENTERPRISE) pCfg->tsdbCfg.encryptAlgorithm = pCreate->encryptAlgorithm; if (pCfg->tsdbCfg.encryptAlgorithm == DND_CA_SM4) { - strncpy(pCfg->tsdbCfg.encryptKey, tsEncryptKey, ENCRYPT_KEY_LEN); + tstrncpy(pCfg->tsdbCfg.encryptKey, tsEncryptKey, ENCRYPT_KEY_LEN + 1); } #else pCfg->tsdbCfg.encryptAlgorithm = 0; @@ -202,7 +202,7 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) { #if defined(TD_ENTERPRISE) pCfg->walCfg.encryptAlgorithm = pCreate->encryptAlgorithm; if (pCfg->walCfg.encryptAlgorithm == DND_CA_SM4) { - strncpy(pCfg->walCfg.encryptKey, tsEncryptKey, ENCRYPT_KEY_LEN); + tstrncpy(pCfg->walCfg.encryptKey, tsEncryptKey, ENCRYPT_KEY_LEN + 1); } #else pCfg->walCfg.encryptAlgorithm = 0; @@ -898,7 +898,7 @@ int32_t vmProcessArbHeartBeatReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { size_t size = taosArrayGetSize(arbHbReq.hbMembers); arbHbRsp.dnodeId = pMgmt->pData->dnodeId; - strncpy(arbHbRsp.arbToken, arbHbReq.arbToken, TSDB_ARB_TOKEN_SIZE); + tstrncpy(arbHbRsp.arbToken, arbHbReq.arbToken, TSDB_ARB_TOKEN_SIZE); arbHbRsp.hbMembers = taosArrayInit(size, sizeof(SVArbHbRspMember)); if (arbHbRsp.hbMembers == NULL) { goto _OVER; diff --git a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c index 9f1c292a90..02cfa2d43b 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c +++ b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c @@ -179,7 +179,7 @@ int32_t dmInitVars(SDnode *pDnode) { //code = taosGetCryptKey(tsAuthCode, pData->machineId, tsCryptKey); code = 0; - strncpy(tsEncryptKey, tsAuthCode, 16); + tstrncpy(tsEncryptKey, tsAuthCode, 16); if (code != 0) { if(code == -1){ @@ -220,6 +220,7 @@ int32_t dmInitVars(SDnode *pDnode) { } extern SMonVloadInfo tsVinfo; + void dmClearVars(SDnode *pDnode) { for (EDndNodeType ntype = DNODE; ntype < NODE_END; ++ntype) { SMgmtWrapper *pWrapper = &pDnode->wrappers[ntype]; diff --git a/source/dnode/mgmt/node_util/src/dmFile.c b/source/dnode/mgmt/node_util/src/dmFile.c index 1da13f72cd..26f45d2fb8 100644 --- a/source/dnode/mgmt/node_util/src/dmFile.c +++ b/source/dnode/mgmt/node_util/src/dmFile.c @@ -230,7 +230,7 @@ static int32_t dmWriteCheckCodeFile(char *file, char *realfile, char *key, bool } SCryptOpts opts; - strncpy(opts.key, key, ENCRYPT_KEY_LEN); + tstrncpy(opts.key, key, ENCRYPT_KEY_LEN + 1); opts.len = len; opts.source = DM_KEY_INDICATOR; opts.result = result; @@ -349,7 +349,7 @@ static int32_t dmCompareEncryptKey(char *file, char *key, bool toLogFile) { } SCryptOpts opts = {0}; - strncpy(opts.key, key, ENCRYPT_KEY_LEN); + tstrncpy(opts.key, key, ENCRYPT_KEY_LEN + 1); opts.len = len; opts.source = content; opts.result = result; @@ -551,7 +551,7 @@ int32_t dmGetEncryptKey() { goto _OVER; } - strncpy(tsEncryptKey, encryptKey, ENCRYPT_KEY_LEN); + strncpy(tsEncryptKey, encryptKey, ENCRYPT_KEY_LEN + 1); taosMemoryFreeClear(encryptKey); tsEncryptionKeyChksum = taosCalcChecksum(0, tsEncryptKey, strlen(tsEncryptKey)); tsEncryptionKeyStat = ENCRYPT_KEY_STAT_LOADED; diff --git a/source/dnode/mnode/impl/src/mndArbGroup.c b/source/dnode/mnode/impl/src/mndArbGroup.c index 0192044e67..3879e8e6e2 100644 --- a/source/dnode/mnode/impl/src/mndArbGroup.c +++ b/source/dnode/mnode/impl/src/mndArbGroup.c @@ -1315,7 +1315,7 @@ static int32_t mndRetrieveArbGroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock continue; } char dbNameInGroup[TSDB_DB_FNAME_LEN]; - strncpy(dbNameInGroup, pVgObj->dbName, TSDB_DB_FNAME_LEN); + tstrncpy(dbNameInGroup, pVgObj->dbName, TSDB_DB_FNAME_LEN); sdbRelease(pSdb, pVgObj); char dbname[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; diff --git a/source/dnode/mnode/impl/src/mndCompact.c b/source/dnode/mnode/impl/src/mndCompact.c index 106680da7f..4c2ff9befc 100644 --- a/source/dnode/mnode/impl/src/mndCompact.c +++ b/source/dnode/mnode/impl/src/mndCompact.c @@ -244,7 +244,7 @@ int32_t mndCompactGetDbName(SMnode *pMnode, int32_t compactId, char *dbname, int TAOS_RETURN(code); } - (void)strncpy(dbname, pCompact->dbname, len); + tstrncpy(dbname, pCompact->dbname, len); mndReleaseCompact(pMnode, pCompact); TAOS_RETURN(code); } @@ -321,7 +321,7 @@ int32_t mndRetrieveCompact(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, TAOS_CHECK_GOTO(tNameFromString(&name, pCompact->dbname, T_NAME_ACCT | T_NAME_DB), &lino, _OVER); (void)tNameGetDbName(&name, varDataVal(tmpBuf)); } else { - (void)strncpy(varDataVal(tmpBuf), pCompact->dbname, TSDB_SHOW_SQL_LEN); + tstrncpy(varDataVal(tmpBuf), pCompact->dbname, TSDB_SHOW_SQL_LEN); } varDataSetLen(tmpBuf, strlen(varDataVal(tmpBuf))); RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)tmpBuf, false), pCompact, &lino, _OVER); @@ -516,11 +516,14 @@ int32_t mndProcessKillCompactReq(SRpcMsg *pReq) { code = TSDB_CODE_ACTION_IN_PROGRESS; - char obj[TSDB_INT32_ID_LEN] = {0}; - (void)sprintf(obj, "%d", pCompact->compactId); - - auditRecord(pReq, pMnode->clusterId, "killCompact", pCompact->dbname, obj, killCompactReq.sql, killCompactReq.sqlLen); - + char obj[TSDB_INT32_ID_LEN] = {0}; + int32_t nBytes = snprintf(obj, sizeof(obj), "%d", pCompact->compactId); + if ((uint32_t)nBytes < sizeof(obj)) { + auditRecord(pReq, pMnode->clusterId, "killCompact", pCompact->dbname, obj, killCompactReq.sql, + killCompactReq.sqlLen); + } else { + mError("compact:%" PRId32 " failed to audit since %s", pCompact->compactId, tstrerror(TSDB_CODE_OUT_OF_RANGE)); + } _OVER: if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { mError("failed to kill compact %" PRId32 " since %s", killCompactReq.compactId, terrstr()); @@ -641,7 +644,7 @@ void mndCompactSendProgressReq(SMnode *pMnode, SCompactObj *pCompact) { char detail[1024] = {0}; int32_t len = tsnprintf(detail, sizeof(detail), "msgType:%s numOfEps:%d inUse:%d", - TMSG_INFO(TDMT_VND_QUERY_COMPACT_PROGRESS), epSet.numOfEps, epSet.inUse); + TMSG_INFO(TDMT_VND_QUERY_COMPACT_PROGRESS), epSet.numOfEps, epSet.inUse); for (int32_t i = 0; i < epSet.numOfEps; ++i) { len += tsnprintf(detail + len, sizeof(detail) - len, " ep:%d-%s:%u", i, epSet.eps[i].fqdn, epSet.eps[i].port); } diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index 0d17ccd0b0..cff5bd4321 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -16,6 +16,7 @@ #define _DEFAULT_SOURCE #include "mndDb.h" #include "audit.h" +#include "command.h" #include "mndArbGroup.h" #include "mndCluster.h" #include "mndDnode.h" @@ -34,7 +35,6 @@ #include "systable.h" #include "thttp.h" #include "tjson.h" -#include "command.h" #define DB_VER_NUMBER 1 #define DB_RESERVE_SIZE 27 @@ -416,7 +416,12 @@ static int32_t mndCheckDbName(const char *dbName, SUserObj *pUser) { return TSDB_CODE_MND_INVALID_DB; } - int32_t acctId = atoi(dbName); + int32_t acctId; + int32_t code = taosStr2int32(dbName, &acctId); + if (code != 0) { + return code; + } + if (acctId != pUser->acctId) { return TSDB_CODE_MND_INVALID_DB_ACCT; } @@ -1560,8 +1565,8 @@ static int32_t mndBuildDropVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj * static int32_t mndSetDropDbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) { int32_t code = 0; - SSdb *pSdb = pMnode->pSdb; - void *pIter = NULL; + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; while (1) { SVgObj *pVgroup = NULL; @@ -1941,9 +1946,9 @@ int32_t mndValidateDbInfo(SMnode *pMnode, SDbCacheInfo *pDbs, int32_t numOfDbs, continue; } else { mTrace("db:%s, valid dbinfo, vgVersion:%d cfgVersion:%d stateTs:%" PRId64 - " numOfTables:%d, changed to vgVersion:%d cfgVersion:%d stateTs:%" PRId64 " numOfTables:%d", - pDbCacheInfo->dbFName, pDbCacheInfo->vgVersion, pDbCacheInfo->cfgVersion, pDbCacheInfo->stateTs, - pDbCacheInfo->numOfTable, pDb->vgVersion, pDb->cfgVersion, pDb->stateTs, numOfTable); + " numOfTables:%d, changed to vgVersion:%d cfgVersion:%d stateTs:%" PRId64 " numOfTables:%d", + pDbCacheInfo->dbFName, pDbCacheInfo->vgVersion, pDbCacheInfo->cfgVersion, pDbCacheInfo->stateTs, + pDbCacheInfo->numOfTable, pDb->vgVersion, pDb->cfgVersion, pDb->stateTs, numOfTable); } if (pDbCacheInfo->cfgVersion < pDb->cfgVersion) { @@ -1955,7 +1960,7 @@ int32_t mndValidateDbInfo(SMnode *pMnode, SDbCacheInfo *pDbs, int32_t numOfDbs, rsp.pTsmaRsp = taosMemoryCalloc(1, sizeof(STableTSMAInfoRsp)); if (rsp.pTsmaRsp) rsp.pTsmaRsp->pTsmas = taosArrayInit(4, POINTER_BYTES); if (rsp.pTsmaRsp && rsp.pTsmaRsp->pTsmas) { - bool exist = false; + bool exist = false; int32_t code = mndGetDbTsmas(pMnode, 0, pDb->uid, rsp.pTsmaRsp, &exist); if (TSDB_CODE_SUCCESS != code) { mndReleaseDb(pMnode, pDb); @@ -2386,7 +2391,8 @@ static void mndDumpDbInfoData(SMnode *pMnode, SSDataBlock *pBlock, SDbObj *pDb, TAOS_CHECK_GOTO(colDataSetVal(pColInfo, rows, (const char *)strictVstr, false), &lino, _OVER); char durationVstr[128] = {0}; - int32_t len = formatDurationOrKeep(&durationVstr[VARSTR_HEADER_SIZE], sizeof(durationVstr) - VARSTR_HEADER_SIZE, pDb->cfg.daysPerFile); + int32_t len = formatDurationOrKeep(&durationVstr[VARSTR_HEADER_SIZE], sizeof(durationVstr) - VARSTR_HEADER_SIZE, + pDb->cfg.daysPerFile); varDataSetLen(durationVstr, len); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); @@ -2402,9 +2408,9 @@ static void mndDumpDbInfoData(SMnode *pMnode, SSDataBlock *pBlock, SDbObj *pDb, int32_t lenKeep2 = formatDurationOrKeep(keep2Str, sizeof(keep2Str), pDb->cfg.daysToKeep2); if (pDb->cfg.daysToKeep0 > pDb->cfg.daysToKeep1 || pDb->cfg.daysToKeep0 > pDb->cfg.daysToKeep2) { - len = sprintf(&keepVstr[VARSTR_HEADER_SIZE], "%s,%s,%s", keep1Str, keep2Str, keep0Str); + len = sprintf(&keepVstr[VARSTR_HEADER_SIZE], "%s,%s,%s", keep1Str, keep2Str, keep0Str); } else { - len = sprintf(&keepVstr[VARSTR_HEADER_SIZE], "%s,%s,%s", keep0Str, keep1Str, keep2Str); + len = sprintf(&keepVstr[VARSTR_HEADER_SIZE], "%s,%s,%s", keep0Str, keep1Str, keep2Str); } varDataSetLen(keepVstr, len); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 8931558874..9ed5861d50 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -462,7 +462,7 @@ int32_t mndGetDnodeData(SMnode *pMnode, SArray *pDnodeInfo) { dInfo.isMnode = 0; } - if(taosArrayPush(pDnodeInfo, &dInfo) == NULL){ + if (taosArrayPush(pDnodeInfo, &dInfo) == NULL) { code = terrno; sdbCancelFetch(pSdb, pIter); break; @@ -471,12 +471,12 @@ int32_t mndGetDnodeData(SMnode *pMnode, SArray *pDnodeInfo) { TAOS_RETURN(code); } -#define CHECK_MONITOR_PARA(para,err) \ -if (pCfg->monitorParas.para != para) { \ - mError("dnode:%d, para:%d inconsistent with cluster:%d", pDnode->id, pCfg->monitorParas.para, para); \ - terrno = err; \ - return err;\ -} +#define CHECK_MONITOR_PARA(para, err) \ + if (pCfg->monitorParas.para != para) { \ + mError("dnode:%d, para:%d inconsistent with cluster:%d", pDnode->id, pCfg->monitorParas.para, para); \ + terrno = err; \ + return err; \ + } static int32_t mndCheckClusterCfgPara(SMnode *pMnode, SDnodeObj *pDnode, const SClusterCfg *pCfg) { CHECK_MONITOR_PARA(tsEnableMonitor, DND_REASON_STATUS_MONITOR_SWITCH_NOT_MATCH); @@ -487,7 +487,8 @@ static int32_t mndCheckClusterCfgPara(SMnode *pMnode, SDnodeObj *pDnode, const S CHECK_MONITOR_PARA(tsSlowLogScope, DND_REASON_STATUS_MONITOR_SLOW_LOG_SCOPE_NOT_MATCH); if (0 != strcasecmp(pCfg->monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb)) { - mError("dnode:%d, tsSlowLogExceptDb:%s inconsistent with cluster:%s", pDnode->id, pCfg->monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb); + mError("dnode:%d, tsSlowLogExceptDb:%s inconsistent with cluster:%s", pDnode->id, + pCfg->monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb); terrno = TSDB_CODE_DNODE_INVALID_MONITOR_PARAS; return DND_REASON_STATUS_MONITOR_NOT_MATCH; } @@ -583,8 +584,8 @@ static bool mndUpdateMnodeState(SMnodeObj *pObj, SMnodeLoad *pMload) { return stateChanged; } -extern char* tsMonFwUri; -extern char* tsMonSlowLogUri; +extern char *tsMonFwUri; +extern char *tsMonSlowLogUri; static int32_t mndProcessStatisReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; SStatisReq statisReq = {0}; @@ -596,9 +597,9 @@ static int32_t mndProcessStatisReq(SRpcMsg *pReq) { mInfo("process statis req,\n %s", statisReq.pCont); } - if (statisReq.type == MONITOR_TYPE_COUNTER){ + if (statisReq.type == MONITOR_TYPE_COUNTER) { monSendContent(statisReq.pCont, tsMonFwUri); - }else if(statisReq.type == MONITOR_TYPE_SLOW_LOG){ + } else if (statisReq.type == MONITOR_TYPE_SLOW_LOG) { monSendContent(statisReq.pCont, tsMonSlowLogUri); } @@ -1058,27 +1059,27 @@ _OVER: TAOS_RETURN(code); } -static void getSlowLogScopeString(int32_t scope, char* result){ - if(scope == SLOW_LOG_TYPE_NULL) { +static void getSlowLogScopeString(int32_t scope, char *result) { + if (scope == SLOW_LOG_TYPE_NULL) { (void)strcat(result, "NONE"); return; } - while(scope > 0){ - if(scope & SLOW_LOG_TYPE_QUERY) { + while (scope > 0) { + if (scope & SLOW_LOG_TYPE_QUERY) { (void)strcat(result, "QUERY"); scope &= ~SLOW_LOG_TYPE_QUERY; - } else if(scope & SLOW_LOG_TYPE_INSERT) { + } else if (scope & SLOW_LOG_TYPE_INSERT) { (void)strcat(result, "INSERT"); scope &= ~SLOW_LOG_TYPE_INSERT; - } else if(scope & SLOW_LOG_TYPE_OTHERS) { + } else if (scope & SLOW_LOG_TYPE_OTHERS) { (void)strcat(result, "OTHERS"); scope &= ~SLOW_LOG_TYPE_OTHERS; - } else{ + } else { (void)printf("invalid slow log scope:%d", scope); return; } - if(scope > 0) { + if (scope > 0) { (void)strcat(result, "|"); } } @@ -1440,7 +1441,7 @@ _OVER: static int32_t mndMCfg2DCfg(SMCfgDnodeReq *pMCfgReq, SDCfgDnodeReq *pDCfgReq) { int32_t code = 0; - char *p = pMCfgReq->config; + char *p = pMCfgReq->config; while (*p) { if (*p == ' ') { break; @@ -1449,7 +1450,7 @@ static int32_t mndMCfg2DCfg(SMCfgDnodeReq *pMCfgReq, SDCfgDnodeReq *pDCfgReq) { } size_t optLen = p - pMCfgReq->config; - (void)strncpy(pDCfgReq->config, pMCfgReq->config, optLen); + tstrncpy(pDCfgReq->config, pMCfgReq->config, optLen + 1); pDCfgReq->config[optLen] = 0; if (' ' == pMCfgReq->config[optLen]) { @@ -1538,7 +1539,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%d", flag); #endif } else { - TAOS_CHECK_GOTO (mndMCfg2DCfg(&cfgReq, &dcfgReq), NULL, _err_out); + TAOS_CHECK_GOTO(mndMCfg2DCfg(&cfgReq, &dcfgReq), NULL, _err_out); if (strlen(dcfgReq.config) > TSDB_DNODE_CONFIG_LEN) { mError("dnode:%d, failed to config since config is too long", cfgReq.dnodeId); code = TSDB_CODE_INVALID_CFG; @@ -1881,11 +1882,19 @@ static int32_t mndMCfgGetValInt32(SMCfgDnodeReq *pMCfgReq, int32_t optLen, int32 if (' ' == pMCfgReq->config[optLen]) { // 'key value' if (strlen(pMCfgReq->value) != 0) goto _err; - *pOutValue = atoi(pMCfgReq->config + optLen + 1); + code = taosStr2int32(pMCfgReq->config + optLen + 1, pOutValue); + if (code != 0) { + mError("dnode:%d, failed to get cfg since %s", pMCfgReq->dnodeId, tstrerror(code)); + goto _err; + } } else { // 'key' 'value' if (strlen(pMCfgReq->value) == 0) goto _err; - *pOutValue = atoi(pMCfgReq->value); + code = taosStr2int32(pMCfgReq->value, pOutValue); + if (code != 0) { + mError("dnode:%d, failed to get cfg since %s", pMCfgReq->dnodeId, tstrerror(code)); + goto _err; + } } TAOS_RETURN(code); diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index 9dd43225b1..83e8abf05e 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -15,8 +15,8 @@ #define _DEFAULT_SOURCE #include "mndAcct.h" -#include "mndArbGroup.h" #include "mndAnode.h" +#include "mndArbGroup.h" #include "mndCluster.h" #include "mndCompact.h" #include "mndCompactDetail.h" @@ -254,7 +254,7 @@ static void mndIncreaseUpTime(SMnode *pMnode) { .pCont = pReq, .contLen = contLen, .info.notFreeAhandle = 1, - .info.ahandle = (void *)0x9527}; + .info.ahandle = 0}; // TODO check return value if (tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg) < 0) { mError("failed to put into write-queue since %s, line:%d", terrstr(), __LINE__); @@ -530,7 +530,7 @@ static int32_t mndInitWal(SMnode *pMnode) { code = TSDB_CODE_DNODE_INVALID_ENCRYPTKEY; TAOS_RETURN(code); } else { - (void)strncpy(cfg.encryptKey, tsEncryptKey, ENCRYPT_KEY_LEN); + tstrncpy(cfg.encryptKey, tsEncryptKey, ENCRYPT_KEY_LEN + 1); } } #endif diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c index 6b1c97b399..6ab3f3bb5c 100644 --- a/source/dnode/mnode/impl/src/mndMnode.c +++ b/source/dnode/mnode/impl/src/mndMnode.c @@ -14,10 +14,10 @@ */ #define _DEFAULT_SOURCE +#include "mndMnode.h" #include "audit.h" #include "mndCluster.h" #include "mndDnode.h" -#include "mndMnode.h" #include "mndPrivilege.h" #include "mndShow.h" #include "mndSync.h" @@ -722,10 +722,13 @@ static int32_t mndProcessCreateMnodeReq(SRpcMsg *pReq) { code = mndCreateMnode(pMnode, pReq, pDnode, &createReq); if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS; - char obj[40] = {0}; - sprintf(obj, "%d", createReq.dnodeId); - - auditRecord(pReq, pMnode->clusterId, "createMnode", "", obj, createReq.sql, createReq.sqlLen); + char obj[40] = {0}; + int32_t bytes = snprintf(obj, sizeof(obj), "%d", createReq.dnodeId); + if ((uint32_t)bytes < sizeof(obj)) { + auditRecord(pReq, pMnode->clusterId, "createMnode", "", obj, createReq.sql, createReq.sqlLen); + } else { + mError("mnode:%d, failed to audit create req since %s", createReq.dnodeId, tstrerror(TSDB_CODE_OUT_OF_RANGE)); + } _OVER: if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { @@ -917,7 +920,9 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); code = colDataSetVal(pColInfo, numOfRows, (const char *)&pObj->id, false); if (code != 0) { - mError("mnode:%d, failed to set col data val since %s", pObj->id, terrstr()); + mError("mnode:%d, failed to set col data val since %s", pObj->id, tstrerror(code)); + sdbCancelFetch(pSdb, pShow->pIter); + sdbRelease(pSdb, pObj); goto _out; } @@ -927,7 +932,9 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); code = colDataSetVal(pColInfo, numOfRows, b1, false); if (code != 0) { - mError("mnode:%d, failed to set col data val since %s", pObj->id, terrstr()); + mError("mnode:%d, failed to set col data val since %s", pObj->id, tstrerror(code)); + sdbCancelFetch(pSdb, pShow->pIter); + sdbRelease(pSdb, pObj); goto _out; } @@ -947,10 +954,8 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB STR_WITH_MAXSIZE_TO_VARSTR(b2, role, pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); code = colDataSetVal(pColInfo, numOfRows, (const char *)b2, false); - if (code != 0) { - mError("mnode:%d, failed to set col data val since %s", pObj->id, terrstr()); - goto _out; - } + if (code != 0) goto _err; + const char *status = "ready"; if (objStatus == SDB_STATUS_CREATING) status = "creating"; if (objStatus == SDB_STATUS_DROPPING) status = "dropping"; @@ -959,25 +964,16 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB STR_WITH_MAXSIZE_TO_VARSTR(b3, status, pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); code = colDataSetVal(pColInfo, numOfRows, (const char *)b3, false); - if (code != 0) { - mError("mnode:%d, failed to set col data val since %s", pObj->id, terrstr()); - goto _out; - } + if (code != 0) goto _err; pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); code = colDataSetVal(pColInfo, numOfRows, (const char *)&pObj->createdTime, false); - if (code != 0) { - mError("mnode:%d, failed to set col data val since %s", pObj->id, terrstr()); - goto _out; - } + if (code != 0) goto _err; int64_t roleTimeMs = (isDnodeOnline) ? pObj->roleTimeMs : 0; pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); code = colDataSetVal(pColInfo, numOfRows, (const char *)&roleTimeMs, false); - if (code != 0) { - mError("mnode:%d, failed to set col data val since %s", pObj->id, terrstr()); - goto _out; - } + if (code != 0) goto _err; numOfRows++; sdbRelease(pSdb, pObj); @@ -988,6 +984,13 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB _out: sdbRelease(pSdb, pSelfObj); return numOfRows; + +_err: + mError("mnode:%d, failed to set col data val since %s", pObj->id, tstrerror(code)); + sdbCancelFetch(pSdb, pShow->pIter); + sdbRelease(pSdb, pObj); + sdbRelease(pSdb, pSelfObj); + return numOfRows; } static void mndCancelGetNextMnode(SMnode *pMnode, void *pIter) { diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index 21aba8df10..f7e0f5349f 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -14,12 +14,12 @@ */ #define _DEFAULT_SOURCE +#include "mndProfile.h" #include "audit.h" #include "mndDb.h" #include "mndDnode.h" #include "mndMnode.h" #include "mndPrivilege.h" -#include "mndProfile.h" #include "mndQnode.h" #include "mndShow.h" #include "mndSma.h" @@ -336,10 +336,13 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { code = 0; - char detail[1000] = {0}; - (void)sprintf(detail, "app:%s", connReq.app); - - auditRecord(pReq, pMnode->clusterId, "login", "", "", detail, strlen(detail)); + char detail[1000] = {0}; + int32_t nBytes = snprintf(detail, sizeof(detail), "app:%s", connReq.app); + if ((uint32_t)nBytes < sizeof(detail)) { + auditRecord(pReq, pMnode->clusterId, "login", "", "", detail, strlen(detail)); + } else { + mError("failed to audit logic since %s", tstrerror(TSDB_CODE_OUT_OF_RANGE)); + } _OVER: diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index aadc67c41f..2b5cd684c5 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -15,6 +15,7 @@ #define _DEFAULT_SOURCE #include "mndSma.h" +#include "functionMgt.h" #include "mndDb.h" #include "mndDnode.h" #include "mndIndex.h" @@ -31,7 +32,6 @@ #include "mndVgroup.h" #include "parser.h" #include "tname.h" -#include "functionMgt.h" #define TSDB_SMA_VER_NUMBER 1 #define TSDB_SMA_RESERVE_SIZE 64 @@ -48,8 +48,8 @@ static int32_t mndProcessGetTbSmaReq(SRpcMsg *pReq); static int32_t mndRetrieveSma(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows); static void mndDestroySmaObj(SSmaObj *pSmaObj); -static int32_t mndProcessCreateTSMAReq(SRpcMsg* pReq); -static int32_t mndProcessDropTSMAReq(SRpcMsg* pReq); +static int32_t mndProcessCreateTSMAReq(SRpcMsg *pReq); +static int32_t mndProcessDropTSMAReq(SRpcMsg *pReq); // sma and tag index comm func static int32_t mndProcessDropIdxReq(SRpcMsg *pReq); @@ -61,11 +61,11 @@ static void mndCancelRetrieveTSMA(SMnode *pMnode, void *pIter); static int32_t mndProcessGetTbTSMAReq(SRpcMsg *pReq); typedef struct SCreateTSMACxt { - SMnode * pMnode; + SMnode *pMnode; const SRpcMsg *pRpcReq; union { const SMCreateSmaReq *pCreateSmaReq; - const SMDropSmaReq * pDropSmaReq; + const SMDropSmaReq *pDropSmaReq; }; SDbObj *pDb; SStbObj *pSrcStb; @@ -298,16 +298,13 @@ void mndReleaseSma(SMnode *pMnode, SSmaObj *pSma) { sdbRelease(pSdb, pSma); } -SDbObj *mndAcquireDbBySma(SMnode *pMnode, const char *db) { - - return mndAcquireDb(pMnode, db); -} +SDbObj *mndAcquireDbBySma(SMnode *pMnode, const char *db) { return mndAcquireDb(pMnode, db); } static void *mndBuildVCreateSmaReq(SMnode *pMnode, SVgObj *pVgroup, SSmaObj *pSma, int32_t *pContLen) { SEncoder encoder = {0}; int32_t contLen = 0; SName name = {0}; - int32_t code = tNameFromString(&name, pSma->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); + int32_t code = tNameFromString(&name, pSma->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); if (TSDB_CODE_SUCCESS != code) { return NULL; } @@ -368,7 +365,7 @@ static void *mndBuildVDropSmaReq(SMnode *pMnode, SVgObj *pVgroup, SSmaObj *pSma, SEncoder encoder = {0}; int32_t contLen; SName name = {0}; - int32_t code = tNameFromString(&name, pSma->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); + int32_t code = tNameFromString(&name, pSma->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); if (TSDB_CODE_SUCCESS != code) { terrno = code; return NULL; @@ -424,9 +421,9 @@ static int32_t mndSetCreateSmaRedoLogs(SMnode *pMnode, STrans *pTrans, SSmaObj * TAOS_RETURN(code); } -static int32_t mndSetCreateSmaUndoLogs(SMnode* pMnode, STrans* pTrans, SSmaObj* pSma) { - int32_t code = 0; - SSdbRaw * pUndoRaw = mndSmaActionEncode(pSma); +static int32_t mndSetCreateSmaUndoLogs(SMnode *pMnode, STrans *pTrans, SSmaObj *pSma) { + int32_t code = 0; + SSdbRaw *pUndoRaw = mndSmaActionEncode(pSma); if (!pUndoRaw) { code = TSDB_CODE_MND_RETURN_VALUE_NULL; if (terrno != 0) code = terrno; @@ -670,7 +667,8 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea // check the maxDelay if (streamObj.conf.triggerParam < TSDB_MIN_ROLLUP_MAX_DELAY) { int64_t msInterval = -1; - int32_t code = convertTimeFromPrecisionToUnit(pCreate->interval, pDb->cfg.precision, TIME_UNIT_MILLISECOND, &msInterval); + int32_t code = + convertTimeFromPrecisionToUnit(pCreate->interval, pDb->cfg.precision, TIME_UNIT_MILLISECOND, &msInterval); if (TSDB_CODE_SUCCESS != code) { mError("sma:%s, failed to create since convert time failed: %s", smaObj.name, tstrerror(code)); return code; @@ -792,7 +790,7 @@ static int32_t mndCheckCreateSmaReq(SMCreateSmaReq *pCreate) { } static int32_t mndGetStreamNameFromSmaName(char *streamName, char *smaName) { - SName n; + SName n; int32_t code = tNameFromString(&n, smaName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); if (TSDB_CODE_SUCCESS != code) { return code; @@ -984,10 +982,10 @@ static int32_t mndSetDropSmaVgroupRedoActions(SMnode *pMnode, STrans *pTrans, SD } static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *pSma) { - int32_t code = -1; - SVgObj *pVgroup = NULL; - SStbObj *pStb = NULL; - STrans *pTrans = NULL; + int32_t code = -1; + SVgObj *pVgroup = NULL; + SStbObj *pStb = NULL; + STrans *pTrans = NULL; SStreamObj *pStream = NULL; pVgroup = mndAcquireVgroup(pMnode, pSma->dstVgId); @@ -1023,7 +1021,6 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p goto _OVER; } - code = mndAcquireStream(pMnode, streamName, &pStream); if (pStream == NULL || pStream->smaId != pSma->uid || code != 0) { sdbRelease(pMnode->pSdb, pStream); @@ -1124,8 +1121,8 @@ _OVER: int32_t mndDropSmasByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) { int32_t code = 0; - SSdb *pSdb = pMnode->pSdb; - void *pIter = NULL; + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; while (1) { SSmaObj *pSma = NULL; @@ -1232,7 +1229,7 @@ static int32_t mndGetSma(SMnode *pMnode, SUserIndexReq *indexReq, SUserIndexRsp FOREACH(node, pList) { SFunctionNode *pFunc = (SFunctionNode *)node; extOffset += tsnprintf(rsp->indexExts + extOffset, sizeof(rsp->indexExts) - extOffset - 1, "%s%s", - (extOffset ? "," : ""), pFunc->functionName); + (extOffset ? "," : ""), pFunc->functionName); } *exist = true; @@ -1439,8 +1436,8 @@ static int32_t mndRetrieveSma(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc SName smaName = {0}; SName stbName = {0}; - char n2[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; - char n3[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + char n2[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + char n3[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; code = tNameFromString(&smaName, pSma->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); char n1[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; if (TSDB_CODE_SUCCESS == code) { @@ -1448,7 +1445,7 @@ static int32_t mndRetrieveSma(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc STR_TO_VARSTR(n2, (char *)mndGetDbStr(pSma->db)); code = tNameFromString(&stbName, pSma->stb, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); } - SColumnInfoData* pColInfo = NULL; + SColumnInfoData *pColInfo = NULL; if (TSDB_CODE_SUCCESS == code) { STR_TO_VARSTR(n3, (char *)tNameGetTableName(&stbName)); @@ -1540,7 +1537,7 @@ static void mndCancelRetrieveIdx(SMnode *pMnode, void *pIter) { taosMemoryFree(p); } -static void initSMAObj(SCreateTSMACxt* pCxt) { +static void initSMAObj(SCreateTSMACxt *pCxt) { memcpy(pCxt->pSma->name, pCxt->pCreateSmaReq->name, TSDB_TABLE_FNAME_LEN); memcpy(pCxt->pSma->stb, pCxt->pCreateSmaReq->stb, TSDB_TABLE_FNAME_LEN); memcpy(pCxt->pSma->db, pCxt->pDb->name, TSDB_DB_FNAME_LEN); @@ -1549,7 +1546,7 @@ static void initSMAObj(SCreateTSMACxt* pCxt) { pCxt->pSma->uid = mndGenerateUid(pCxt->pCreateSmaReq->name, TSDB_TABLE_FNAME_LEN); memcpy(pCxt->pSma->dstTbName, pCxt->targetStbFullName, TSDB_TABLE_FNAME_LEN); - pCxt->pSma->dstTbUid = 0; // not used + pCxt->pSma->dstTbUid = 0; // not used pCxt->pSma->stbUid = pCxt->pSrcStb ? pCxt->pSrcStb->uid : pCxt->pCreateSmaReq->normSourceTbUid; pCxt->pSma->dbUid = pCxt->pDb->uid; pCxt->pSma->interval = pCxt->pCreateSmaReq->interval; @@ -1598,7 +1595,7 @@ static int32_t mndCreateTSMABuildCreateStreamReq(SCreateTSMACxt *pCxt) { if (!pCxt->pCreateStreamReq->pTags) { return terrno; } - SField f = {0}; + SField f = {0}; int32_t code = 0; if (pCxt->pSrcStb) { for (int32_t idx = 0; idx < pCxt->pCreateStreamReq->numOfTags - 1; ++idx) { @@ -1626,9 +1623,9 @@ static int32_t mndCreateTSMABuildCreateStreamReq(SCreateTSMACxt *pCxt) { if (TSDB_CODE_SUCCESS == code) { // construct output cols - SNode* pNode; + SNode *pNode; FOREACH(pNode, pCxt->pProjects) { - SExprNode* pExprNode = (SExprNode*)pNode; + SExprNode *pExprNode = (SExprNode *)pNode; f.bytes = pExprNode->resType.bytes; f.type = pExprNode->resType.type; f.flags = COL_SMA_ON; @@ -1642,7 +1639,7 @@ static int32_t mndCreateTSMABuildCreateStreamReq(SCreateTSMACxt *pCxt) { return code; } -static int32_t mndCreateTSMABuildDropStreamReq(SCreateTSMACxt* pCxt) { +static int32_t mndCreateTSMABuildDropStreamReq(SCreateTSMACxt *pCxt) { tstrncpy(pCxt->pDropStreamReq->name, pCxt->streamName, TSDB_STREAM_FNAME_LEN); pCxt->pDropStreamReq->igNotExists = false; pCxt->pDropStreamReq->sql = taosStrdup(pCxt->pDropSmaReq->name); @@ -1685,14 +1682,13 @@ static int32_t mndSetUpdateDbTsmaVersionCommitLogs(SMnode *pMnode, STrans *pTran TAOS_RETURN(sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY)); } -static int32_t mndCreateTSMATxnPrepare(SCreateTSMACxt* pCxt) { +static int32_t mndCreateTSMATxnPrepare(SCreateTSMACxt *pCxt) { int32_t code = -1; STransAction createStreamRedoAction = {0}; STransAction createStreamUndoAction = {0}; STransAction dropStbUndoAction = {0}; SMDropStbReq dropStbReq = {0}; - STrans *pTrans = - mndTransCreate(pCxt->pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_TSMA, pCxt->pRpcReq, "create-tsma"); + STrans *pTrans = mndTransCreate(pCxt->pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_TSMA, pCxt->pRpcReq, "create-tsma"); if (!pTrans) { code = terrno; goto _OVER; @@ -1713,7 +1709,9 @@ static int32_t mndCreateTSMATxnPrepare(SCreateTSMACxt* pCxt) { code = terrno; goto _OVER; } - if (createStreamRedoAction.contLen != tSerializeSCMCreateStreamReq(createStreamRedoAction.pCont, createStreamRedoAction.contLen, pCxt->pCreateStreamReq)) { + if (createStreamRedoAction.contLen != tSerializeSCMCreateStreamReq(createStreamRedoAction.pCont, + createStreamRedoAction.contLen, + pCxt->pCreateStreamReq)) { mError("sma: %s, failed to create due to create stream req encode failure", pCxt->pCreateSmaReq->name); code = TSDB_CODE_INVALID_MSG; goto _OVER; @@ -1728,7 +1726,8 @@ static int32_t mndCreateTSMATxnPrepare(SCreateTSMACxt* pCxt) { code = terrno; goto _OVER; } - if (createStreamUndoAction.contLen != tSerializeSMDropStreamReq(createStreamUndoAction.pCont, createStreamUndoAction.contLen, pCxt->pDropStreamReq)) { + if (createStreamUndoAction.contLen != + tSerializeSMDropStreamReq(createStreamUndoAction.pCont, createStreamUndoAction.contLen, pCxt->pDropStreamReq)) { mError("sma: %s, failed to create due to drop stream req encode failure", pCxt->pCreateSmaReq->name); code = TSDB_CODE_INVALID_MSG; goto _OVER; @@ -1746,7 +1745,8 @@ static int32_t mndCreateTSMATxnPrepare(SCreateTSMACxt* pCxt) { code = terrno; goto _OVER; } - if (dropStbUndoAction.contLen != tSerializeSMDropStbReq(dropStbUndoAction.pCont, dropStbUndoAction.contLen, &dropStbReq)) { + if (dropStbUndoAction.contLen != + tSerializeSMDropStbReq(dropStbUndoAction.pCont, dropStbUndoAction.contLen, &dropStbReq)) { mError("sma: %s, failed to create due to drop stb req encode failure", pCxt->pCreateSmaReq->name); code = TSDB_CODE_INVALID_MSG; goto _OVER; @@ -1781,7 +1781,7 @@ static int32_t mndCreateTSMA(SCreateTSMACxt *pCxt) { pCxt->pSma = &sma; initSMAObj(pCxt); - SNodeList* pProjects = NULL; + SNodeList *pProjects = NULL; code = nodesStringToList(pCxt->pCreateSmaReq->expr, &pProjects); if (TSDB_CODE_SUCCESS != code) { goto _OVER; @@ -1830,8 +1830,8 @@ _OVER: TAOS_RETURN(code); } -static int32_t mndTSMAGenerateOutputName(const char* tsmaName, char* streamName, char* targetStbName) { - SName smaName; +static int32_t mndTSMAGenerateOutputName(const char *tsmaName, char *streamName, char *targetStbName) { + SName smaName; int32_t code = tNameFromString(&smaName, tsmaName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); if (TSDB_CODE_SUCCESS != code) { return code; @@ -1841,17 +1841,17 @@ static int32_t mndTSMAGenerateOutputName(const char* tsmaName, char* streamName, return TSDB_CODE_SUCCESS; } -static int32_t mndProcessCreateTSMAReq(SRpcMsg* pReq) { +static int32_t mndProcessCreateTSMAReq(SRpcMsg *pReq) { #ifdef WINDOWS TAOS_RETURN(TSDB_CODE_MND_INVALID_PLATFORM); #endif - SMnode * pMnode = pReq->info.node; + SMnode *pMnode = pReq->info.node; int32_t code = -1; - SDbObj * pDb = NULL; - SStbObj * pStb = NULL; - SSmaObj * pSma = NULL; - SSmaObj * pBaseTsma = NULL; - SStreamObj * pStream = NULL; + SDbObj *pDb = NULL; + SStbObj *pStb = NULL; + SSmaObj *pSma = NULL; + SSmaObj *pBaseTsma = NULL; + SStreamObj *pStream = NULL; int64_t mTraceId = TRACE_GET_ROOTID(&pReq->info.traceId); SMCreateSmaReq createReq = {0}; @@ -1941,16 +1941,16 @@ static int32_t mndProcessCreateTSMAReq(SRpcMsg* pReq) { } SCreateTSMACxt cxt = { - .pMnode = pMnode, - .pCreateSmaReq = &createReq, - .pCreateStreamReq = NULL, - .streamName = streamName, - .targetStbFullName = streamTargetStbFullName, - .pDb = pDb, - .pRpcReq = pReq, - .pSma = NULL, - .pBaseSma = pBaseTsma, - .pSrcStb = pStb, + .pMnode = pMnode, + .pCreateSmaReq = &createReq, + .pCreateStreamReq = NULL, + .streamName = streamName, + .targetStbFullName = streamTargetStbFullName, + .pDb = pDb, + .pRpcReq = pReq, + .pSma = NULL, + .pBaseSma = pBaseTsma, + .pSrcStb = pStb, }; code = mndCreateTSMA(&cxt); @@ -1971,10 +1971,10 @@ _OVER: TAOS_RETURN(code); } -static int32_t mndDropTSMA(SCreateTSMACxt* pCxt) { - int32_t code = -1; +static int32_t mndDropTSMA(SCreateTSMACxt *pCxt) { + int32_t code = -1; STransAction dropStreamRedoAction = {0}; - STrans *pTrans = mndTransCreate(pCxt->pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_TSMA, pCxt->pRpcReq, "drop-tsma"); + STrans *pTrans = mndTransCreate(pCxt->pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_TSMA, pCxt->pRpcReq, "drop-tsma"); if (!pTrans) { code = terrno; goto _OVER; @@ -2021,7 +2021,8 @@ static int32_t mndDropTSMA(SCreateTSMACxt* pCxt) { code = terrno; goto _OVER; } - if (dropStbRedoAction.contLen != tSerializeSMDropStbReq(dropStbRedoAction.pCont, dropStbRedoAction.contLen, &dropStbReq)) { + if (dropStbRedoAction.contLen != + tSerializeSMDropStbReq(dropStbRedoAction.pCont, dropStbRedoAction.contLen, &dropStbReq)) { mError("tsma: %s, failedto drop due to drop stb req encode failure", pCxt->pDropSmaReq->name); code = TSDB_CODE_INVALID_MSG; goto _OVER; @@ -2044,9 +2045,9 @@ _OVER: TAOS_RETURN(code); } -static bool hasRecursiveTsmasBasedOnMe(SMnode* pMnode, const SSmaObj* pSma) { +static bool hasRecursiveTsmasBasedOnMe(SMnode *pMnode, const SSmaObj *pSma) { SSmaObj *pSmaObj = NULL; - void * pIter = NULL; + void *pIter = NULL; while (1) { pIter = sdbFetch(pMnode->pSdb, SDB_SMA, pIter, (void **)&pSmaObj); if (pIter == NULL) break; @@ -2060,25 +2061,25 @@ static bool hasRecursiveTsmasBasedOnMe(SMnode* pMnode, const SSmaObj* pSma) { return false; } -static int32_t mndProcessDropTSMAReq(SRpcMsg* pReq) { +static int32_t mndProcessDropTSMAReq(SRpcMsg *pReq) { int32_t code = -1; SMDropSmaReq dropReq = {0}; - SSmaObj * pSma = NULL; - SDbObj * pDb = NULL; - SMnode * pMnode = pReq->info.node; + SSmaObj *pSma = NULL; + SDbObj *pDb = NULL; + SMnode *pMnode = pReq->info.node; if (tDeserializeSMDropSmaReq(pReq->pCont, pReq->contLen, &dropReq) != TSDB_CODE_SUCCESS) { code = TSDB_CODE_INVALID_MSG; goto _OVER; } - char streamName[TSDB_TABLE_FNAME_LEN] = {0}; - char streamTargetStbFullName[TSDB_TABLE_FNAME_LEN] = {0}; + char streamName[TSDB_TABLE_FNAME_LEN] = {0}; + char streamTargetStbFullName[TSDB_TABLE_FNAME_LEN] = {0}; code = mndTSMAGenerateOutputName(dropReq.name, streamName, streamTargetStbFullName); if (TSDB_CODE_SUCCESS != code) { goto _OVER; } - SStbObj* pStb = mndAcquireStb(pMnode, streamTargetStbFullName); + SStbObj *pStb = mndAcquireStb(pMnode, streamTargetStbFullName); pSma = mndAcquireSma(pMnode, dropReq.name); if (!pSma && dropReq.igNotExists) { @@ -2113,13 +2114,13 @@ static int32_t mndProcessDropTSMAReq(SRpcMsg* pReq) { } SCreateTSMACxt cxt = { - .pDb = pDb, - .pMnode = pMnode, - .pRpcReq = pReq, - .pSma = pSma, - .streamName = streamName, - .targetStbFullName = streamTargetStbFullName, - .pDropSmaReq = &dropReq, + .pDb = pDb, + .pMnode = pMnode, + .pRpcReq = pReq, + .pSma = pSma, + .streamName = streamName, + .targetStbFullName = streamTargetStbFullName, + .pDropSmaReq = &dropReq, }; code = mndDropTSMA(&cxt); @@ -2134,10 +2135,10 @@ _OVER: } static int32_t mndRetrieveTSMA(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) { - SDbObj * pDb = NULL; + SDbObj *pDb = NULL; int32_t numOfRows = 0; - SSmaObj * pSma = NULL; - SMnode * pMnode = pReq->info.node; + SSmaObj *pSma = NULL; + SMnode *pMnode = pReq->info.node; int32_t code = 0; SColumnInfoData *pColInfo; if (pShow->pIter == NULL) { @@ -2153,7 +2154,7 @@ static int32_t mndRetrieveTSMA(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlo while (numOfRows < rows) { pIter->pSmaIter = sdbFetch(pMnode->pSdb, SDB_SMA, pIter->pSmaIter, (void **)&pSma); if (pIter->pSmaIter == NULL) break; - SDbObj* pSrcDb = mndAcquireDb(pMnode, pSma->db); + SDbObj *pSrcDb = mndAcquireDb(pMnode, pSma->db); if ((pDb && pSma->dbUid != pDb->uid) || !pSrcDb) { sdbRelease(pMnode->pSdb, pSma); @@ -2176,7 +2177,7 @@ static int32_t mndRetrieveTSMA(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlo if (TSDB_CODE_SUCCESS == code) { STR_TO_VARSTR(db, (char *)mndGetDbStr(pSma->db)); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - code = colDataSetVal(pColInfo, numOfRows, (const char*)db, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)db, false); } if (TSDB_CODE_SUCCESS == code) { @@ -2186,12 +2187,12 @@ static int32_t mndRetrieveTSMA(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlo if (TSDB_CODE_SUCCESS == code) { STR_TO_VARSTR(srcTb, (char *)tNameGetTableName(&n)); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - code = colDataSetVal(pColInfo, numOfRows, (const char*)srcTb, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)srcTb, false); } if (TSDB_CODE_SUCCESS == code) { pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - code = colDataSetVal(pColInfo, numOfRows, (const char*)db, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)db, false); } if (TSDB_CODE_SUCCESS == code) { @@ -2200,29 +2201,29 @@ static int32_t mndRetrieveTSMA(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlo if (TSDB_CODE_SUCCESS == code) { char targetTb[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_TO_VARSTR(targetTb, (char*)tNameGetTableName(&n)); + STR_TO_VARSTR(targetTb, (char *)tNameGetTableName(&n)); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - code = colDataSetVal(pColInfo, numOfRows, (const char*)targetTb, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)targetTb, false); } if (TSDB_CODE_SUCCESS == code) { // stream name pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - code = colDataSetVal(pColInfo, numOfRows, (const char*)smaName, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)smaName, false); } if (TSDB_CODE_SUCCESS == code) { pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - code = colDataSetVal(pColInfo, numOfRows, (const char*)(&pSma->createdTime), false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)(&pSma->createdTime), false); } // interval - char interval[64 + VARSTR_HEADER_SIZE] = {0}; + char interval[64 + VARSTR_HEADER_SIZE] = {0}; int32_t len = 0; if (TSDB_CODE_SUCCESS == code) { if (!IS_CALENDAR_TIME_DURATION(pSma->intervalUnit)) { len = tsnprintf(interval + VARSTR_HEADER_SIZE, 64, "%" PRId64 "%c", pSma->interval, - getPrecisionUnit(pSrcDb->cfg.precision)); + getPrecisionUnit(pSrcDb->cfg.precision)); } else { len = tsnprintf(interval + VARSTR_HEADER_SIZE, 64, "%" PRId64 "%c", pSma->interval, pSma->intervalUnit); } @@ -2243,17 +2244,17 @@ static int32_t mndRetrieveTSMA(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlo // func list len = 0; SNode *pNode = NULL, *pFunc = NULL; - if (TSDB_CODE_SUCCESS == code) { + if (TSDB_CODE_SUCCESS == code) { code = nodesStringToNode(pSma->ast, &pNode); } if (TSDB_CODE_SUCCESS == code) { - char * start = buf + VARSTR_HEADER_SIZE; + char *start = buf + VARSTR_HEADER_SIZE; FOREACH(pFunc, ((SSelectStmt *)pNode)->pProjectionList) { if (nodeType(pFunc) == QUERY_NODE_FUNCTION) { SFunctionNode *pFuncNode = (SFunctionNode *)pFunc; if (!fmIsTSMASupportedFunc(pFuncNode->funcId)) continue; len += tsnprintf(start, TSDB_MAX_SAVED_SQL_LEN - len, "%s%s", start != buf + VARSTR_HEADER_SIZE ? "," : "", - ((SExprNode *)pFunc)->userAlias); + ((SExprNode *)pFunc)->userAlias); if (len >= TSDB_MAX_SAVED_SQL_LEN) { len = TSDB_MAX_SAVED_SQL_LEN; break; @@ -2297,7 +2298,8 @@ static void mndCancelRetrieveTSMA(SMnode *pMnode, void *pIter) { taosMemoryFree(p); } -int32_t dumpTSMAInfoFromSmaObj(const SSmaObj* pSma, const SStbObj* pDestStb, STableTSMAInfo* pInfo, const SSmaObj* pBaseTsma) { +int32_t dumpTSMAInfoFromSmaObj(const SSmaObj *pSma, const SStbObj *pDestStb, STableTSMAInfo *pInfo, + const SSmaObj *pBaseTsma) { int32_t code = 0; pInfo->interval = pSma->interval; pInfo->unit = pSma->intervalUnit; @@ -2336,7 +2338,7 @@ int32_t dumpTSMAInfoFromSmaObj(const SSmaObj* pSma, const SStbObj* pDestStb, STa SSelectStmt *pSelect = (SSelectStmt *)pNode; FOREACH(pFunc, pSelect->pProjectionList) { STableTSMAFuncInfo funcInfo = {0}; - SFunctionNode * pFuncNode = (SFunctionNode *)pFunc; + SFunctionNode *pFuncNode = (SFunctionNode *)pFunc; if (!fmIsTSMASupportedFunc(pFuncNode->funcId)) continue; funcInfo.funcId = pFuncNode->funcId; funcInfo.colId = ((SColumnNode *)pFuncNode->pParameterList->pHead->pNode)->colId; @@ -2383,9 +2385,9 @@ int32_t dumpTSMAInfoFromSmaObj(const SSmaObj* pSma, const SStbObj* pDestStb, STa } // @note remember to mndReleaseSma(*ppOut) -static int32_t mndGetDeepestBaseForTsma(SMnode* pMnode, SSmaObj* pSma, SSmaObj** ppOut) { - int32_t code = 0; - SSmaObj* pRecursiveTsma = NULL; +static int32_t mndGetDeepestBaseForTsma(SMnode *pMnode, SSmaObj *pSma, SSmaObj **ppOut) { + int32_t code = 0; + SSmaObj *pRecursiveTsma = NULL; if (pSma->baseSmaName[0]) { pRecursiveTsma = mndAcquireSma(pMnode, pSma->baseSmaName); if (!pRecursiveTsma) { @@ -2393,7 +2395,7 @@ static int32_t mndGetDeepestBaseForTsma(SMnode* pMnode, SSmaObj* pSma, SSmaObj** return TSDB_CODE_MND_SMA_NOT_EXIST; } while (pRecursiveTsma->baseSmaName[0]) { - SSmaObj* pTmpSma = pRecursiveTsma; + SSmaObj *pTmpSma = pRecursiveTsma; pRecursiveTsma = mndAcquireSma(pMnode, pTmpSma->baseSmaName); if (!pRecursiveTsma) { mError("base tsma: %s for tsma: %s not found", pTmpSma->baseSmaName, pTmpSma->name); @@ -2407,7 +2409,6 @@ static int32_t mndGetDeepestBaseForTsma(SMnode* pMnode, SSmaObj* pSma, SSmaObj** return code; } - static int32_t mndGetTSMA(SMnode *pMnode, char *tsmaFName, STableTSMAInfoRsp *rsp, bool *exist) { int32_t code = -1; SSmaObj *pSma = NULL; @@ -2450,17 +2451,17 @@ static int32_t mndGetTSMA(SMnode *pMnode, char *tsmaFName, STableTSMAInfoRsp *rs TAOS_RETURN(code); } -typedef bool (*tsmaFilter)(const SSmaObj* pSma, void* param); +typedef bool (*tsmaFilter)(const SSmaObj *pSma, void *param); -static int32_t mndGetSomeTsmas(SMnode* pMnode, STableTSMAInfoRsp* pRsp, tsmaFilter filtered, void* param, bool* exist) { - int32_t code = 0; - SSmaObj * pSma = NULL; - SSmaObj * pBaseTsma = NULL; - SSdb * pSdb = pMnode->pSdb; - void * pIter = NULL; - SStreamObj * pStream = NULL; - SStbObj * pStb = NULL; - bool shouldRetry = false; +static int32_t mndGetSomeTsmas(SMnode *pMnode, STableTSMAInfoRsp *pRsp, tsmaFilter filtered, void *param, bool *exist) { + int32_t code = 0; + SSmaObj *pSma = NULL; + SSmaObj *pBaseTsma = NULL; + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; + SStreamObj *pStream = NULL; + SStbObj *pStb = NULL; + bool shouldRetry = false; while (1) { pIter = sdbFetch(pSdb, SDB_SMA, pIter, (void **)&pSma); @@ -2479,7 +2480,7 @@ static int32_t mndGetSomeTsmas(SMnode* pMnode, STableTSMAInfoRsp* pRsp, tsmaFilt } SName smaName; - char streamName[TSDB_TABLE_FNAME_LEN] = {0}; + char streamName[TSDB_TABLE_FNAME_LEN] = {0}; code = tNameFromString(&smaName, pSma->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); if (TSDB_CODE_SUCCESS != code) { sdbRelease(pSdb, pSma); @@ -2541,8 +2542,8 @@ static int32_t mndGetSomeTsmas(SMnode* pMnode, STableTSMAInfoRsp* pRsp, tsmaFilt return TSDB_CODE_SUCCESS; } -static bool tsmaTbFilter(const SSmaObj* pSma, void* param) { - const char* tbFName = param; +static bool tsmaTbFilter(const SSmaObj *pSma, void *param) { + const char *tbFName = param; return pSma->stb[0] != tbFName[0] || strcmp(pSma->stb, tbFName) != 0; } @@ -2550,7 +2551,7 @@ static int32_t mndGetTableTSMA(SMnode *pMnode, char *tbFName, STableTSMAInfoRsp return mndGetSomeTsmas(pMnode, pRsp, tsmaTbFilter, tbFName, exist); } -static bool tsmaDbFilter(const SSmaObj* pSma, void* param) { +static bool tsmaDbFilter(const SSmaObj *pSma, void *param) { uint64_t *dbUid = param; return pSma->dbUid != *dbUid; } @@ -2564,7 +2565,7 @@ static int32_t mndProcessGetTbTSMAReq(SRpcMsg *pReq) { int32_t code = -1; STableTSMAInfoReq tsmaReq = {0}; bool exist = false; - SMnode * pMnode = pReq->info.node; + SMnode *pMnode = pReq->info.node; TAOS_CHECK_GOTO(tDeserializeTableTSMAInfoReq(pReq->pCont, pReq->contLen, &tsmaReq), NULL, _OVER); @@ -2635,12 +2636,12 @@ static int32_t mkNonExistTSMAInfo(const STSMAVersion *pTsmaVer, STableTSMAInfo * int32_t mndValidateTSMAInfo(SMnode *pMnode, STSMAVersion *pTsmaVersions, int32_t numOfTsmas, void **ppRsp, int32_t *pRspLen) { - int32_t code = -1; - STSMAHbRsp hbRsp = {0}; - int32_t rspLen = 0; - void * pRsp = NULL; - char tsmaFName[TSDB_TABLE_FNAME_LEN] = {0}; - STableTSMAInfo * pTsmaInfo = NULL; + int32_t code = -1; + STSMAHbRsp hbRsp = {0}; + int32_t rspLen = 0; + void *pRsp = NULL; + char tsmaFName[TSDB_TABLE_FNAME_LEN] = {0}; + STableTSMAInfo *pTsmaInfo = NULL; hbRsp.pTsmas = taosArrayInit(numOfTsmas, POINTER_BYTES); if (!hbRsp.pTsmas) { @@ -2649,13 +2650,13 @@ int32_t mndValidateTSMAInfo(SMnode *pMnode, STSMAVersion *pTsmaVersions, int32_t } for (int32_t i = 0; i < numOfTsmas; ++i) { - STSMAVersion* pTsmaVer = &pTsmaVersions[i]; + STSMAVersion *pTsmaVer = &pTsmaVersions[i]; pTsmaVer->dbId = be64toh(pTsmaVer->dbId); pTsmaVer->tsmaId = be64toh(pTsmaVer->tsmaId); pTsmaVer->version = ntohl(pTsmaVer->version); snprintf(tsmaFName, sizeof(tsmaFName), "%s.%s", pTsmaVer->dbFName, pTsmaVer->name); - SSmaObj* pSma = mndAcquireSma(pMnode, tsmaFName); + SSmaObj *pSma = mndAcquireSma(pMnode, tsmaFName); if (!pSma) { code = mkNonExistTSMAInfo(pTsmaVer, &pTsmaInfo); if (code) goto _OVER; @@ -2683,7 +2684,7 @@ int32_t mndValidateTSMAInfo(SMnode *pMnode, STSMAVersion *pTsmaVersions, int32_t continue; } - SStbObj* pDestStb = mndAcquireStb(pMnode, pSma->dstTbName); + SStbObj *pDestStb = mndAcquireStb(pMnode, pSma->dstTbName); if (!pDestStb) { mInfo("tsma: %s.%" PRIx64 " dest stb: %s not found, maybe dropped", tsmaFName, pTsmaVer->tsmaId, pSma->dstTbName); code = mkNonExistTSMAInfo(pTsmaVer, &pTsmaInfo); @@ -2698,7 +2699,7 @@ int32_t mndValidateTSMAInfo(SMnode *pMnode, STSMAVersion *pTsmaVersions, int32_t } // dump smaObj into rsp - STableTSMAInfo * pInfo = NULL; + STableTSMAInfo *pInfo = NULL; pInfo = taosMemoryCalloc(1, sizeof(STableTSMAInfo)); if (!pInfo) { code = terrno; @@ -2707,7 +2708,7 @@ int32_t mndValidateTSMAInfo(SMnode *pMnode, STSMAVersion *pTsmaVersions, int32_t goto _OVER; } - SSmaObj* pBaseSma = NULL; + SSmaObj *pBaseSma = NULL; code = mndGetDeepestBaseForTsma(pMnode, pSma, &pBaseSma); if (code == 0) code = dumpTSMAInfoFromSmaObj(pSma, pDestStb, pInfo, pBaseSma); diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 5d41e1506c..0a107518df 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -1931,11 +1931,6 @@ static int32_t mndProcessVgroupChange(SMnode *pMnode, SVgroupChangeInfo *pChange sdbCancelFetch(pSdb, pIter); return terrno = code; } - - code = mndStreamRegisterTrans(pTrans, MND_STREAM_TASK_UPDATE_NAME, pStream->uid); - if (code) { - mError("failed to register trans, transId:%d, and continue", pTrans->id); - } } if (!includeAllNodes) { @@ -1951,6 +1946,12 @@ static int32_t mndProcessVgroupChange(SMnode *pMnode, SVgroupChangeInfo *pChange mDebug("stream:0x%" PRIx64 " %s involved node changed, create update trans, transId:%d", pStream->uid, pStream->name, pTrans->id); + // NOTE: for each stream, we register one trans entry for task update + code = mndStreamRegisterTrans(pTrans, MND_STREAM_TASK_UPDATE_NAME, pStream->uid); + if (code) { + mError("failed to register trans, transId:%d, and continue", pTrans->id); + } + code = mndStreamSetUpdateEpsetAction(pMnode, pStream, pChangeInfo, pTrans); // todo: not continue, drop all and retry again diff --git a/source/dnode/mnode/impl/src/mndStreamTrans.c b/source/dnode/mnode/impl/src/mndStreamTrans.c index 905a73ad48..a1e104aeca 100644 --- a/source/dnode/mnode/impl/src/mndStreamTrans.c +++ b/source/dnode/mnode/impl/src/mndStreamTrans.c @@ -35,7 +35,11 @@ int32_t mndStreamClearFinishedTrans(SMnode *pMnode, int32_t *pNumOfActiveChkpt) size_t keyLen = 0; void *pIter = NULL; SArray *pList = taosArrayInit(4, sizeof(SKeyInfo)); - int32_t num = 0; + int32_t numOfChkpt = 0; + + if (pNumOfActiveChkpt != NULL) { + *pNumOfActiveChkpt = 0; + } if (pList == NULL) { return terrno; @@ -50,15 +54,15 @@ int32_t mndStreamClearFinishedTrans(SMnode *pMnode, int32_t *pNumOfActiveChkpt) void *pKey = taosHashGetKey(pEntry, &keyLen); // key is the name of src/dst db name SKeyInfo info = {.pKey = pKey, .keyLen = keyLen}; - mDebug("transId:%d %s startTs:%" PRId64 " cleared since finished", pEntry->transId, pEntry->name, - pEntry->startTime); + mDebug("transId:%d stream:0x%" PRIx64 " %s startTs:%" PRId64 " cleared since finished", pEntry->transId, + pEntry->streamId, pEntry->name, pEntry->startTime); void* p = taosArrayPush(pList, &info); if (p == NULL) { return terrno; } } else { if (strcmp(pEntry->name, MND_STREAM_CHECKPOINT_NAME) == 0) { - num++; + numOfChkpt++; } mndReleaseTrans(pMnode, pTrans); } @@ -78,48 +82,34 @@ int32_t mndStreamClearFinishedTrans(SMnode *pMnode, int32_t *pNumOfActiveChkpt) } } - mDebug("clear %d finished stream-trans, remained:%d, active checkpoint trans:%d", size, - taosHashGetSize(execInfo.transMgmt.pDBTrans), num); + mDebug("clear %d finished stream-trans, active trans:%d, active checkpoint trans:%d", size, + taosHashGetSize(execInfo.transMgmt.pDBTrans), numOfChkpt); taosArrayDestroy(pList); if (pNumOfActiveChkpt != NULL) { - *pNumOfActiveChkpt = num; + *pNumOfActiveChkpt = numOfChkpt; } return 0; } -// * Transactions of different streams are not related. Here only check the conflict of transaction for a given stream. -// For a given stream: -// 1. checkpoint trans is conflict with any other trans except for the drop and reset trans. -// 2. create/drop/reset/update trans are conflict with any other trans. -int32_t mndStreamTransConflictCheck(SMnode *pMnode, int64_t streamId, const char *pTransName, bool lock) { - if (lock) { - streamMutexLock(&execInfo.lock); - } - +static int32_t doStreamTransConflictCheck(SMnode *pMnode, int64_t streamId, const char *pTransName) { int32_t num = taosHashGetSize(execInfo.transMgmt.pDBTrans); if (num <= 0) { - if (lock) { - streamMutexUnlock(&execInfo.lock); - } return 0; } + // if any task updates exist, any other stream trans are not allowed to be created int32_t code = mndStreamClearFinishedTrans(pMnode, NULL); if (code) { - mError("failed to clear finish trans, code:%s", tstrerror(code)); + mError("failed to clear finish trans, code:%s, and continue", tstrerror(code)); } SStreamTransInfo *pEntry = taosHashGet(execInfo.transMgmt.pDBTrans, &streamId, sizeof(streamId)); if (pEntry != NULL) { SStreamTransInfo tInfo = *pEntry; - if (lock) { - streamMutexUnlock(&execInfo.lock); - } - if (strcmp(tInfo.name, MND_STREAM_CHECKPOINT_NAME) == 0) { if ((strcmp(pTransName, MND_STREAM_DROP_NAME) != 0) && (strcmp(pTransName, MND_STREAM_TASK_RESET_NAME) != 0) && (strcmp(pTransName, MND_STREAM_RESTART_NAME) != 0)) { @@ -141,11 +131,25 @@ int32_t mndStreamTransConflictCheck(SMnode *pMnode, int64_t streamId, const char mDebug("stream:0x%" PRIx64 " no conflict trans existed, continue create trans", streamId); } + return TSDB_CODE_SUCCESS; +} + +// * Transactions of different streams are not related. Here only check the conflict of transaction for a given stream. +// For a given stream: +// 1. checkpoint trans is conflict with any other trans except for the drop and reset trans. +// 2. create/drop/reset/update trans are conflict with any other trans. +int32_t mndStreamTransConflictCheck(SMnode *pMnode, int64_t streamId, const char *pTransName, bool lock) { + if (lock) { + streamMutexLock(&execInfo.lock); + } + + int32_t code = doStreamTransConflictCheck(pMnode, streamId, pTransName); + if (lock) { streamMutexUnlock(&execInfo.lock); } - return 0; + return code; } int32_t mndStreamGetRelTrans(SMnode *pMnode, int64_t streamId) { diff --git a/source/dnode/mnode/impl/src/mndStreamUtil.c b/source/dnode/mnode/impl/src/mndStreamUtil.c index bb666eb6dd..00b36977b6 100644 --- a/source/dnode/mnode/impl/src/mndStreamUtil.c +++ b/source/dnode/mnode/impl/src/mndStreamUtil.c @@ -108,30 +108,94 @@ static bool checkStatusForEachReplica(SVgObj *pVgroup) { return true; } -int32_t mndTakeVgroupSnapshot(SMnode *pMnode, bool *allReady, SArray **pList) { +static int32_t mndAddSnodeInfo(SMnode *pMnode, SArray *pVgroupList) { + SSnodeObj *pObj = NULL; + void *pIter = NULL; + int32_t code = 0; + + while (1) { + pIter = sdbFetch(pMnode->pSdb, SDB_SNODE, pIter, (void **)&pObj); + if (pIter == NULL) { + break; + } + + SNodeEntry entry = {.nodeId = SNODE_HANDLE}; + code = addEpIntoEpSet(&entry.epset, pObj->pDnode->fqdn, pObj->pDnode->port); + if (code) { + sdbRelease(pMnode->pSdb, pObj); + sdbCancelFetch(pMnode->pSdb, pIter); + mError("failed to extract epset for fqdn:%s during task vgroup snapshot", pObj->pDnode->fqdn); + return code; + } + + char buf[256] = {0}; + code = epsetToStr(&entry.epset, buf, tListLen(buf)); + if (code != 0) { // print error and continue + mError("failed to convert epset to str, code:%s", tstrerror(code)); + } + + void *p = taosArrayPush(pVgroupList, &entry); + if (p == NULL) { + code = terrno; + sdbRelease(pMnode->pSdb, pObj); + sdbCancelFetch(pMnode->pSdb, pIter); + mError("failed to put entry in vgroup list, nodeId:%d code:%s", entry.nodeId, tstrerror(code)); + return code; + } else { + mDebug("take snode snapshot, nodeId:%d %s", entry.nodeId, buf); + } + + sdbRelease(pMnode->pSdb, pObj); + } + + return code; +} + +static int32_t mndCheckMnodeStatus(SMnode* pMnode) { + int32_t code = 0; + ESdbStatus objStatus; + void *pIter = NULL; + SMnodeObj *pObj = NULL; + + while (1) { + pIter = sdbFetchAll(pMnode->pSdb, SDB_MNODE, pIter, (void **)&pObj, &objStatus, true); + if (pIter == NULL) { + break; + } + + if (pObj->syncState != TAOS_SYNC_STATE_LEADER && pObj->syncState != TAOS_SYNC_STATE_FOLLOWER) { + mDebug("mnode sync state:%d not leader/follower", pObj->syncState); + sdbRelease(pMnode->pSdb, pObj); + sdbCancelFetch(pMnode->pSdb, pIter); + return TSDB_CODE_FAILED; + } + + if (objStatus != SDB_STATUS_READY) { + mWarn("mnode status:%d not ready", objStatus); + sdbRelease(pMnode->pSdb, pObj); + sdbCancelFetch(pMnode->pSdb, pIter); + return TSDB_CODE_FAILED; + } + + sdbRelease(pMnode->pSdb, pObj); + } + + return TSDB_CODE_SUCCESS; +} + +static int32_t mndCheckAndAddVgroupsInfo(SMnode *pMnode, SArray *pVgroupList, bool* allReady) { SSdb *pSdb = pMnode->pSdb; void *pIter = NULL; SVgObj *pVgroup = NULL; int32_t code = 0; - SArray *pVgroupList = NULL; SHashObj *pHash = NULL; - pVgroupList = taosArrayInit(4, sizeof(SNodeEntry)); - if (pVgroupList == NULL) { - mError("failed to prepare arraylist during take vgroup snapshot, code:%s", tstrerror(terrno)); - code = terrno; - goto _err; - } - pHash = taosHashInit(10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); if (pHash == NULL) { mError("failed to prepare hashmap during take vgroup snapshot, code:%s", tstrerror(terrno)); - code = terrno; - goto _err; + return terrno; } - *allReady = true; - while (1) { pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup); if (pIter == NULL) { @@ -148,7 +212,7 @@ int32_t mndTakeVgroupSnapshot(SMnode *pMnode, bool *allReady, SArray **pList) { mError("failed to put info into hashmap during task vgroup snapshot, code:%s", tstrerror(code)); sdbRelease(pSdb, pVgroup); sdbCancelFetch(pSdb, pIter); - goto _err; // take snapshot failed, and not all ready + goto _end; // take snapshot failed, and not all ready } } else { if (*pReplica != pVgroup->replica) { @@ -158,7 +222,7 @@ int32_t mndTakeVgroupSnapshot(SMnode *pMnode, bool *allReady, SArray **pList) { } } - // if not all ready till now, no need to check the remaining vgroups. + // if not all ready till now, no need to check the remaining vgroups, // but still we need to put the info of the existed vgroups into the snapshot list if (*allReady) { *allReady = checkStatusForEachReplica(pVgroup); @@ -176,7 +240,7 @@ int32_t mndTakeVgroupSnapshot(SMnode *pMnode, bool *allReady, SArray **pList) { code = terrno; sdbRelease(pSdb, pVgroup); sdbCancelFetch(pSdb, pIter); - goto _err; + goto _end; } else { mDebug("take node snapshot, nodeId:%d %s", entry.nodeId, buf); } @@ -184,51 +248,49 @@ int32_t mndTakeVgroupSnapshot(SMnode *pMnode, bool *allReady, SArray **pList) { sdbRelease(pSdb, pVgroup); } - SSnodeObj *pObj = NULL; - while (1) { - pIter = sdbFetch(pSdb, SDB_SNODE, pIter, (void **)&pObj); - if (pIter == NULL) { - break; - } +_end: + taosHashCleanup(pHash); + return code; +} - SNodeEntry entry = {.nodeId = SNODE_HANDLE}; - code = addEpIntoEpSet(&entry.epset, pObj->pDnode->fqdn, pObj->pDnode->port); - if (code) { - sdbRelease(pSdb, pObj); - sdbCancelFetch(pSdb, pIter); - mError("failed to extract epset for fqdn:%s during task vgroup snapshot", pObj->pDnode->fqdn); - goto _err; - } +int32_t mndTakeVgroupSnapshot(SMnode *pMnode, bool *allReady, SArray **pList) { + int32_t code = 0; + SArray *pVgroupList = NULL; - char buf[256] = {0}; - code = epsetToStr(&entry.epset, buf, tListLen(buf)); - if (code != 0) { // print error and continue - mError("failed to convert epset to str, code:%s", tstrerror(code)); - } + *pList = NULL; + *allReady = true; - void *p = taosArrayPush(pVgroupList, &entry); - if (p == NULL) { - code = terrno; - sdbRelease(pSdb, pObj); - sdbCancelFetch(pSdb, pIter); - mError("failed to put entry in vgroup list, nodeId:%d code:%s", entry.nodeId, tstrerror(code)); - goto _err; - } else { - mDebug("take snode snapshot, nodeId:%d %s", entry.nodeId, buf); - } + pVgroupList = taosArrayInit(4, sizeof(SNodeEntry)); + if (pVgroupList == NULL) { + mError("failed to prepare arraylist during take vgroup snapshot, code:%s", tstrerror(terrno)); + code = terrno; + goto _err; + } - sdbRelease(pSdb, pObj); + // 1. check for all vnodes status + code = mndCheckAndAddVgroupsInfo(pMnode, pVgroupList, allReady); + if (code) { + goto _err; + } + + // 2. add snode info + code = mndAddSnodeInfo(pMnode, pVgroupList); + if (code) { + goto _err; + } + + // 3. check for mnode status + code = mndCheckMnodeStatus(pMnode); + if (code != TSDB_CODE_SUCCESS) { + *allReady = false; } *pList = pVgroupList; - taosHashCleanup(pHash); return code; _err: *allReady = false; taosArrayDestroy(pVgroupList); - taosHashCleanup(pHash); - return code; } diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 4268d73746..ae5da415a2 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -52,10 +52,17 @@ static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans, bool t static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans, bool topHalf); static bool mndTransPerformFinishStage(SMnode *pMnode, STrans *pTrans, bool topHalf); -static bool mndCannotExecuteTransAction(SMnode *pMnode, bool topHalf) { - return (!pMnode->deploy && !mndIsLeader(pMnode)) || !topHalf; +static inline bool mndTransIsInSyncContext(bool topHalf) { return !topHalf; } + +static bool mndCannotExecuteTrans(SMnode *pMnode, bool topHalf) { + bool isLeader = mndIsLeader(pMnode); + bool ret = (!pMnode->deploy && !isLeader) || mndTransIsInSyncContext(topHalf); + if (ret) mDebug("cannot execute trans action, deploy:%d, isLeader:%d, topHalf:%d", pMnode->deploy, isLeader, topHalf); + return ret; } +static inline char *mndStrExecutionContext(bool topHalf) { return topHalf ? "transContext" : "syncContext"; } + static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans); static int32_t mndProcessTransTimer(SRpcMsg *pReq); static int32_t mndProcessTtl(SRpcMsg *pReq); @@ -1339,7 +1346,7 @@ static int32_t mndTransWriteSingleLog(SMnode *pMnode, STrans *pTrans, STransActi // execute in trans context static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransAction *pAction, bool topHalf) { if (pAction->msgSent) return 0; - if (mndCannotExecuteTransAction(pMnode, topHalf)) { + if (mndCannotExecuteTrans(pMnode, topHalf)) { TAOS_RETURN(TSDB_CODE_MND_TRANS_CTX_SWITCH); } @@ -1485,8 +1492,8 @@ static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pA static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans, bool topHalf) { int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->redoActions, topHalf); if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS && code != TSDB_CODE_MND_TRANS_CTX_SWITCH) { - mError("trans:%d, failed to execute redoActions since:%s, code:0x%x, topHalf(TransContext):%d", pTrans->id, - terrstr(), terrno, topHalf); + mError("trans:%d, failed to execute redoActions since:%s, code:0x%x, in %s", pTrans->id, terrstr(), terrno, + mndStrExecutionContext(topHalf)); } return code; } @@ -1494,8 +1501,8 @@ static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans, bool t static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans, bool topHalf) { int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->undoActions, topHalf); if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS && code != TSDB_CODE_MND_TRANS_CTX_SWITCH) { - mError("trans:%d, failed to execute undoActions since %s. topHalf(TransContext):%d", pTrans->id, terrstr(), - topHalf); + mError("trans:%d, failed to execute undoActions since %s. in %s", pTrans->id, terrstr(), + mndStrExecutionContext(topHalf)); } return code; } @@ -1503,8 +1510,8 @@ static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans, bool t static int32_t mndTransExecuteCommitActions(SMnode *pMnode, STrans *pTrans, bool topHalf) { int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->commitActions, topHalf); if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS && code != TSDB_CODE_MND_TRANS_CTX_SWITCH) { - mError("trans:%d, failed to execute commitActions since %s. topHalf(TransContext):%d", pTrans->id, terrstr(), - topHalf); + mError("trans:%d, failed to execute commitActions since %s. in %s", pTrans->id, terrstr(), + mndStrExecutionContext(topHalf)); } return code; } @@ -1524,7 +1531,7 @@ static int32_t mndTransExecuteActionsSerial(SMnode *pMnode, STrans *pTrans, SArr for (int32_t action = pTrans->actionPos; action < numOfActions; ++action) { STransAction *pAction = taosArrayGet(pActions, action); - mInfo("trans:%d, current action:%d, stage:%s, actionType(0:log,1:msg):%d", pTrans->id, pTrans->actionPos, + mInfo("trans:%d, current action:%d, stage:%s, actionType(1:msg,2:log):%d", pTrans->id, pTrans->actionPos, mndTransStr(pAction->stage), pAction->actionType); code = mndTransExecSingleAction(pMnode, pTrans, pAction, topHalf); @@ -1555,11 +1562,11 @@ static int32_t mndTransExecuteActionsSerial(SMnode *pMnode, STrans *pTrans, SArr } mndSetTransLastAction(pTrans, pAction); - if (mndCannotExecuteTransAction(pMnode, topHalf)) { + if (mndCannotExecuteTrans(pMnode, topHalf)) { pTrans->lastErrorNo = code; pTrans->code = code; - mInfo("trans:%d, %s:%d, topHalf(TransContext):%d, not execute next action, code:%s", pTrans->id, - mndTransStr(pAction->stage), action, topHalf, tstrerror(code)); + mInfo("trans:%d, %s:%d, cannot execute next action in %s, code:%s", pTrans->id, mndTransStr(pAction->stage), + action, mndStrExecutionContext(topHalf), tstrerror(code)); break; } @@ -1660,20 +1667,21 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans, bool code = mndTransExecuteRedoActions(pMnode, pTrans, topHalf); } - if (mndCannotExecuteTransAction(pMnode, topHalf)) { + if (code != 0 && code != TSDB_CODE_MND_TRANS_CTX_SWITCH && mndTransIsInSyncContext(topHalf)) { pTrans->lastErrorNo = code; pTrans->code = code; - bool continueExec = true; - if (code != 0 && code != TSDB_CODE_MND_TRANS_CTX_SWITCH) { - taosMsleep(100); - continueExec = true; - } else { - continueExec = false; + mInfo( + "trans:%d, failed to execute, will retry redo action stage in 100 ms , in %s, " + "continueExec:%d, code:%s", + pTrans->id, mndStrExecutionContext(topHalf), continueExec, tstrerror(code)); + taosMsleep(100); + return true; + } else { + if (mndCannotExecuteTrans(pMnode, topHalf)) { + mInfo("trans:%d, cannot continue to execute redo action stage in %s, continueExec:%d, code:%s", pTrans->id, + mndStrExecutionContext(topHalf), continueExec, tstrerror(code)); + return false; } - mInfo("trans:%d, cannot execute redo action stage, topHalf(TransContext):%d, continueExec:%d, code:%s", pTrans->id, - topHalf, continueExec, tstrerror(code)); - - return continueExec; } terrno = code; @@ -1716,9 +1724,9 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans, bool return continueExec; } -// in trans context +// execute in trans context static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans, bool topHalf) { - if (mndCannotExecuteTransAction(pMnode, topHalf)) return false; + if (mndCannotExecuteTrans(pMnode, topHalf)) return false; bool continueExec = true; int32_t code = mndTransCommit(pMnode, pTrans); @@ -1772,7 +1780,7 @@ static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans, bool code = mndTransExecuteUndoActions(pMnode, pTrans, topHalf); } - if (mndCannotExecuteTransAction(pMnode, topHalf)) return false; + if (mndCannotExecuteTrans(pMnode, topHalf)) return false; terrno = code; if (code == 0) { @@ -1793,7 +1801,7 @@ static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans, bool // in trans context static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans, bool topHalf) { - if (mndCannotExecuteTransAction(pMnode, topHalf)) return false; + if (mndCannotExecuteTrans(pMnode, topHalf)) return false; bool continueExec = true; int32_t code = mndTransRollback(pMnode, pTrans); @@ -1810,8 +1818,9 @@ static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans, bool to return continueExec; } +// excute in trans context static bool mndTransPerformPreFinishStage(SMnode *pMnode, STrans *pTrans, bool topHalf) { - if (mndCannotExecuteTransAction(pMnode, topHalf)) return false; + if (mndCannotExecuteTrans(pMnode, topHalf)) return false; bool continueExec = true; int32_t code = mndTransPreFinish(pMnode, pTrans); @@ -1854,8 +1863,8 @@ void mndTransExecuteImp(SMnode *pMnode, STrans *pTrans, bool topHalf) { bool continueExec = true; while (continueExec) { - mInfo("trans:%d, continue to execute, stage:%s createTime:%" PRId64 " topHalf(TransContext):%d", pTrans->id, - mndTransStr(pTrans->stage), pTrans->createdTime, topHalf); + mInfo("trans:%d, continue to execute stage:%s in %s, createTime:%" PRId64 "", pTrans->id, + mndTransStr(pTrans->stage), mndStrExecutionContext(topHalf), pTrans->createdTime); pTrans->lastExecTime = taosGetTimestampMs(); switch (pTrans->stage) { case TRN_STAGE_PREPARE: diff --git a/source/dnode/mnode/impl/src/mndUser.c b/source/dnode/mnode/impl/src/mndUser.c index 63390d4772..6a9d67fc83 100644 --- a/source/dnode/mnode/impl/src/mndUser.c +++ b/source/dnode/mnode/impl/src/mndUser.c @@ -1706,8 +1706,8 @@ static int32_t mndCreateUser(SMnode *pMnode, char *acct, SCreateUserReq *pCreate if (pCreate->isImport != 1) { taosEncryptPass_c((uint8_t *)pCreate->pass, strlen(pCreate->pass), userObj.pass); } else { - // mInfo("pCreate->pass:%s", pCreate->pass) - strncpy(userObj.pass, pCreate->pass, TSDB_PASSWORD_LEN); + // mInfo("pCreate->pass:%s", pCreate->eass) + memcpy(userObj.pass, pCreate->pass, TSDB_PASSWORD_LEN); } tstrncpy(userObj.user, pCreate->user, TSDB_USER_LEN); tstrncpy(userObj.acct, acct, TSDB_USER_LEN); @@ -1803,6 +1803,43 @@ _OVER: TAOS_RETURN(code); } +static int32_t mndCheckPasswordFmt(const char *pwd) { + int32_t len = strlen(pwd); + if (len < TSDB_PASSWORD_MIN_LEN || len > TSDB_PASSWORD_MAX_LEN) { + return -1; + } + + if (strcmp(pwd, "taosdata") == 0) { + return 0; + } + + bool charTypes[4] = {0}; + for (int32_t i = 0; i < len; ++i) { + if (taosIsBigChar(pwd[i])) { + charTypes[0] = true; + } else if (taosIsSmallChar(pwd[i])) { + charTypes[1] = true; + } else if (taosIsNumberChar(pwd[i])) { + charTypes[2] = true; + } else if (taosIsSpecialChar(pwd[i])) { + charTypes[3] = true; + } else { + return -1; + } + } + + int32_t numOfTypes = 0; + for (int32_t i = 0; i < 4; ++i) { + numOfTypes += charTypes[i]; + } + + if (numOfTypes < 3) { + return -1; + } + + return 0; +} + static int32_t mndProcessCreateUserReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; int32_t code = 0; @@ -1836,7 +1873,7 @@ static int32_t mndProcessCreateUserReq(SRpcMsg *pReq) { TAOS_CHECK_GOTO(TSDB_CODE_MND_INVALID_USER_FORMAT, &lino, _OVER); } - if (createReq.pass[0] == 0) { + if (mndCheckPasswordFmt(createReq.pass) != 0) { TAOS_CHECK_GOTO(TSDB_CODE_MND_INVALID_PASS_FORMAT, &lino, _OVER); } @@ -2325,8 +2362,7 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) { TAOS_CHECK_GOTO(TSDB_CODE_MND_INVALID_USER_FORMAT, &lino, _OVER); } - if (TSDB_ALTER_USER_PASSWD == alterReq.alterType && - (alterReq.pass[0] == 0 || strlen(alterReq.pass) >= TSDB_PASSWORD_LEN)) { + if (TSDB_ALTER_USER_PASSWD == alterReq.alterType && mndCheckPasswordFmt(alterReq.pass) != 0) { TAOS_CHECK_GOTO(TSDB_CODE_MND_INVALID_PASS_FORMAT, &lino, _OVER); } diff --git a/source/dnode/mnode/sdb/src/sdbFile.c b/source/dnode/mnode/sdb/src/sdbFile.c index 474b22cca0..948aa870bd 100644 --- a/source/dnode/mnode/sdb/src/sdbFile.c +++ b/source/dnode/mnode/sdb/src/sdbFile.c @@ -370,7 +370,7 @@ static int32_t sdbReadFileImp(SSdb *pSdb) { opts.source = pRaw->pData; opts.result = plantContent; opts.unitLen = 16; - strncpy(opts.key, tsEncryptKey, ENCRYPT_KEY_LEN); + tstrncpy(opts.key, tsEncryptKey, ENCRYPT_KEY_LEN + 1); count = CBC_Decrypt(&opts); @@ -510,7 +510,7 @@ static int32_t sdbWriteFileImp(SSdb *pSdb, int32_t skip_type) { opts.source = pRaw->pData; opts.result = newData; opts.unitLen = 16; - strncpy(opts.key, tsEncryptKey, ENCRYPT_KEY_LEN); + tstrncpy(opts.key, tsEncryptKey, ENCRYPT_KEY_LEN + 1); int32_t count = CBC_Encrypt(&opts); diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 80c04a3276..bcacac20a2 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -254,11 +254,12 @@ static void tdRSmaTaskInit(SStreamMeta *pMeta, SRSmaInfoItem *pItem, SStreamTask } static void tdRSmaTaskRemove(SStreamMeta *pMeta, int64_t streamId, int32_t taskId) { + streamMetaWLock(pMeta); + int32_t code = streamMetaUnregisterTask(pMeta, streamId, taskId); if (code != 0) { smaError("vgId:%d, rsma task:%" PRIi64 ",%d drop failed since %s", pMeta->vgId, streamId, taskId, tstrerror(code)); } - streamMetaWLock(pMeta); int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta); if (streamMetaCommit(pMeta) < 0) { // persist to disk diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index bd78f62cae..937bc6e268 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -1108,91 +1108,76 @@ _OVER: return code; } +// always return success to mnode +//todo: handle failure of build and send msg to mnode +static void doSendChkptSourceRsp(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, int32_t code, + int32_t taskId) { + SRpcMsg rsp = {0}; + int32_t ret = streamTaskBuildCheckpointSourceRsp(pReq, pRpcInfo, &rsp, code); + if (ret) { // suppress the error in build checkpoint source rsp + tqError("s-task:0x%x failed to build checkpoint-source rsp, code:%s", taskId, tstrerror(ret)); + } + tmsgSendRsp(&rsp); // error occurs +} + // no matter what kinds of error happened, make sure the mnode will receive the success execution code. int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) { - int32_t vgId = TD_VID(pTq->pVnode); - SStreamMeta* pMeta = pTq->pStreamMeta; - char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); - int32_t len = pMsg->contLen - sizeof(SMsgHead); - int32_t code = 0; + int32_t vgId = TD_VID(pTq->pVnode); + SStreamMeta* pMeta = pTq->pStreamMeta; + char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); + int32_t len = pMsg->contLen - sizeof(SMsgHead); + int32_t code = 0; + SStreamCheckpointSourceReq req = {0}; + SDecoder decoder = {0}; + SStreamTask* pTask = NULL; + int64_t checkpointId = 0; // disable auto rsp to mnode pRsp->info.handle = NULL; - SStreamCheckpointSourceReq req = {0}; - SDecoder decoder; tDecoderInit(&decoder, (uint8_t*)msg, len); if (tDecodeStreamCheckpointSourceReq(&decoder, &req) < 0) { code = TSDB_CODE_MSG_DECODE_ERROR; tDecoderClear(&decoder); tqError("vgId:%d failed to decode checkpoint-source msg, code:%s", vgId, tstrerror(code)); - - SRpcMsg rsp = {0}; - int32_t ret = streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); - if (ret) { // suppress the error in build checkpointsource rsp - tqError("s-task:0x%x failed to build checkpoint-source rsp, code:%s", req.taskId, tstrerror(code)); - } - - tmsgSendRsp(&rsp); // error occurs - return TSDB_CODE_SUCCESS; // always return success to mnode, todo: handle failure of build and send msg to mnode + doSendChkptSourceRsp(&req, &pMsg->info, TSDB_CODE_SUCCESS, req.taskId); + return TSDB_CODE_SUCCESS; // always return success to mnode, } + tDecoderClear(&decoder); if (!vnodeIsRoleLeader(pTq->pVnode)) { tqDebug("vgId:%d not leader, ignore checkpoint-source msg, s-task:0x%x", vgId, req.taskId); - SRpcMsg rsp = {0}; - int32_t ret = streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); - if (ret) { // suppress the error in build checkpointsource rsp - tqError("s-task:0x%x failed to build checkpoint-source rsp, code:%s", req.taskId, tstrerror(code)); - } - - tmsgSendRsp(&rsp); // error occurs - return TSDB_CODE_SUCCESS; // always return success to mnode, todo: handle failure of build and send msg to mnode + doSendChkptSourceRsp(&req, &pMsg->info, TSDB_CODE_SUCCESS, req.taskId); + return TSDB_CODE_SUCCESS; // always return success to mnode } if (!pTq->pVnode->restored) { tqDebug("vgId:%d checkpoint-source msg received during restoring, checkpointId:%" PRId64 ", transId:%d s-task:0x%x ignore it", vgId, req.checkpointId, req.transId, req.taskId); - SRpcMsg rsp = {0}; - int32_t ret = streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); - if (ret) { // suppress the error in build checkpointsource rsp - tqError("s-task:0x%x failed to build checkpoint-source rsp, code:%s", req.taskId, tstrerror(code)); - } - - tmsgSendRsp(&rsp); // error occurs - return TSDB_CODE_SUCCESS; // always return success to mnode, , todo: handle failure of build and send msg to mnode + doSendChkptSourceRsp(&req, &pMsg->info, TSDB_CODE_SUCCESS, req.taskId); + return TSDB_CODE_SUCCESS; // always return success to mnode } - SStreamTask* pTask = NULL; code = streamMetaAcquireTask(pMeta, req.streamId, req.taskId, &pTask); if (pTask == NULL || code != 0) { tqError("vgId:%d failed to find s-task:0x%x, ignore checkpoint msg. checkpointId:%" PRId64 " transId:%d it may have been destroyed", vgId, req.taskId, req.checkpointId, req.transId); - SRpcMsg rsp = {0}; - int32_t ret = streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); - if (ret) { // suppress the error in build checkpointsource rsp - tqError("s-task:%s failed to build checkpoint-source rsp, code:%s", pTask->id.idStr, tstrerror(code)); - } - tmsgSendRsp(&rsp); // error occurs + doSendChkptSourceRsp(&req, &pMsg->info, TSDB_CODE_SUCCESS, req.taskId); return TSDB_CODE_SUCCESS; } if (pTask->status.downstreamReady != 1) { - streamTaskSetFailedChkptInfo(pTask, req.transId, req.checkpointId); // record the latest failed checkpoint id + // record the latest failed checkpoint id + streamTaskSetFailedChkptInfo(pTask, req.transId, req.checkpointId); tqError("s-task:%s not ready for checkpoint, since downstream not ready, ignore this checkpointId:%" PRId64 ", transId:%d set it failed", pTask->id.idStr, req.checkpointId, req.transId); + streamMetaReleaseTask(pMeta, pTask); - - SRpcMsg rsp = {0}; - int32_t ret = streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); - if (ret) { // suppress the error in build checkpointsource rsp - tqError("s-task:%s failed to build checkpoint-source rsp, code:%s", pTask->id.idStr, tstrerror(code)); - } - - tmsgSendRsp(&rsp); // error occurs + doSendChkptSourceRsp(&req, &pMsg->info, TSDB_CODE_SUCCESS, req.taskId); return TSDB_CODE_SUCCESS; // todo retry handle error } @@ -1207,14 +1192,7 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) streamMutexUnlock(&pTask->lock); streamMetaReleaseTask(pMeta, pTask); - - SRpcMsg rsp = {0}; - int32_t ret = streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); - if (ret) { // suppress the error in build checkpointsource rsp - tqError("s-task:%s failed to build checkpoint-source rsp, code:%s", pTask->id.idStr, tstrerror(code)); - } - - tmsgSendRsp(&rsp); // error occurs + doSendChkptSourceRsp(&req, &pMsg->info, TSDB_CODE_SUCCESS, req.taskId); return TSDB_CODE_SUCCESS; } } else { @@ -1226,7 +1204,6 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) // check if the checkpoint msg already sent or not. if (status == TASK_STATUS__CK) { - int64_t checkpointId = 0; streamTaskGetActiveCheckpointInfo(pTask, NULL, &checkpointId); tqWarn("s-task:%s repeatly recv checkpoint-source msg checkpointId:%" PRId64 @@ -1235,7 +1212,7 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) streamMutexUnlock(&pTask->lock); streamMetaReleaseTask(pMeta, pTask); - + doSendChkptSourceRsp(&req, &pMsg->info, TSDB_CODE_SYN_PROPOSE_NOT_READY, req.taskId); return TSDB_CODE_SUCCESS; } else { // checkpoint already finished, and not in checkpoint status if (req.checkpointId <= pTask->chkInfo.checkpointId) { @@ -1245,15 +1222,7 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) streamMutexUnlock(&pTask->lock); streamMetaReleaseTask(pMeta, pTask); - - SRpcMsg rsp = {0}; - int32_t ret = streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); - if (ret) { // suppress the error in build checkpointsource rsp - tqError("s-task:%s failed to build checkpoint-source rsp, code:%s", pTask->id.idStr, tstrerror(code)); - } - - tmsgSendRsp(&rsp); // error occurs - + doSendChkptSourceRsp(&req, &pMsg->info, TSDB_CODE_SUCCESS, req.taskId); return TSDB_CODE_SUCCESS; } } @@ -1264,7 +1233,9 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) if (code) { qError("s-task:%s (vgId:%d) failed to process checkpoint-source req, code:%s", pTask->id.idStr, vgId, tstrerror(code)); - return code; + streamMetaReleaseTask(pMeta, pTask); + doSendChkptSourceRsp(&req, &pMsg->info, TSDB_CODE_SUCCESS, req.taskId); + return TSDB_CODE_SUCCESS; } if (req.mndTrigger) { @@ -1279,13 +1250,8 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) code = streamAddCheckpointSourceRspMsg(&req, &pMsg->info, pTask); if (code != TSDB_CODE_SUCCESS) { - SRpcMsg rsp = {0}; - int32_t ret = streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); - if (ret) { // suppress the error in build checkpointsource rsp - tqError("s-task:%s failed to build checkpoint-source rsp, code:%s", pTask->id.idStr, tstrerror(code)); - } - tmsgSendRsp(&rsp); // error occurs - return TSDB_CODE_SUCCESS; + streamTaskSetCheckpointFailed(pTask); // set the checkpoint failed + doSendChkptSourceRsp(&req, &pMsg->info, TSDB_CODE_SUCCESS, req.taskId); } streamMetaReleaseTask(pMeta, pTask); diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index be41f7e99e..51ef02de56 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -13,9 +13,7 @@ * along with this program. If not, see . */ -#include #include "tcommon.h" -#include "tmsg.h" #include "tq.h" #define IS_NEW_SUBTB_RULE(_t) (((_t)->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER) && ((_t)->subtableWithoutMd5 != 1)) @@ -50,7 +48,7 @@ static int32_t doPutSinkTableInfoIntoCache(SSHashObj* pSinkTableMap, STableSinkI static bool doGetSinkTableInfoFromCache(SSHashObj* pTableInfoMap, uint64_t groupId, STableSinkInfo** pInfo); static int32_t doRemoveSinkTableInfoInCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id); static int32_t checkTagSchema(SStreamTask* pTask, SVnode* pVnode); -static void reubuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVnode* pVnode, int64_t earlyTs); +static void rebuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVnode* pVnode, int64_t earlyTs); static int32_t handleResultBlockMsg(SStreamTask* pTask, SSDataBlock* pDataBlock, int32_t index, SVnode* pVnode, int64_t earlyTs); @@ -1062,7 +1060,7 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) { return; } - reubuildAndSendMultiResBlock(pTask, pBlocks, pVnode, earlyTs); + rebuildAndSendMultiResBlock(pTask, pBlocks, pVnode, earlyTs); } } @@ -1165,7 +1163,7 @@ int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* return TSDB_CODE_SUCCESS; } -void reubuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVnode* pVnode, int64_t earlyTs) { +void rebuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVnode* pVnode, int64_t earlyTs) { int32_t code = 0; const char* id = pTask->id.idStr; int32_t vgId = pTask->pMeta->vgId; diff --git a/source/dnode/vnode/src/tq/tqStreamTask.c b/source/dnode/vnode/src/tq/tqStreamTask.c index 29372c5da7..bc7e2e28e3 100644 --- a/source/dnode/vnode/src/tq/tqStreamTask.c +++ b/source/dnode/vnode/src/tq/tqStreamTask.c @@ -17,19 +17,20 @@ #include "vnd.h" #define MAX_REPEAT_SCAN_THRESHOLD 3 -#define SCAN_WAL_IDLE_DURATION 100 +#define SCAN_WAL_IDLE_DURATION 500 // idle for 500ms to do next wal scan typedef struct SBuildScanWalMsgParam { int64_t metaId; int32_t numOfTasks; } SBuildScanWalMsgParam; -static int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle); +static int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta); static int32_t setWalReaderStartOffset(SStreamTask* pTask, int32_t vgId); static bool handleFillhistoryScanComplete(SStreamTask* pTask, int64_t ver); static bool taskReadyForDataFromWal(SStreamTask* pTask); static int32_t doPutDataIntoInputQ(SStreamTask* pTask, int64_t maxVer, int32_t* numOfItems, bool* pSucc); static int32_t tqScanWalInFuture(STQ* pTq, int32_t numOfTasks, int32_t idleDuration); +static int32_t doScanWalAsync(STQ* pTq, bool ckPause); // extract data blocks(submit/delete) from WAL, and add them into the input queue for all the sources tasks. int32_t tqScanWal(STQ* pTq) { @@ -37,12 +38,11 @@ int32_t tqScanWal(STQ* pTq) { int32_t vgId = pMeta->vgId; int64_t st = taosGetTimestampMs(); int32_t numOfTasks = 0; - bool shouldIdle = true; tqDebug("vgId:%d continue to check if data in wal are available, scanCounter:%d", vgId, pMeta->scanInfo.scanCounter); // check all tasks - int32_t code = doScanWalForAllTasks(pMeta, &shouldIdle); + int32_t code = doScanWalForAllTasks(pMeta); if (code) { tqError("vgId:%d failed to start all tasks, try next time, code:%s", vgId, tstrerror(code)); return code; @@ -133,10 +133,9 @@ int32_t tqScanWalInFuture(STQ* pTq, int32_t numOfTasks, int32_t idleDuration) { } int32_t tqScanWalAsync(STQ* pTq, bool ckPause) { - int32_t vgId = TD_VID(pTq->pVnode); SStreamMeta* pMeta = pTq->pStreamMeta; bool alreadyRestored = pTq->pVnode->restored; - int32_t numOfTasks = 0; + int32_t code = 0; // do not launch the stream tasks, if it is a follower or not restored vnode. if (!(vnodeIsRoleLeader(pTq->pVnode) && alreadyRestored)) { @@ -144,47 +143,8 @@ int32_t tqScanWalAsync(STQ* pTq, bool ckPause) { } streamMetaWLock(pMeta); - - numOfTasks = taosArrayGetSize(pMeta->pTaskList); - if (numOfTasks == 0) { - tqDebug("vgId:%d no stream tasks existed to run", vgId); - streamMetaWUnLock(pMeta); - return 0; - } - - if (pMeta->startInfo.startAllTasks) { - tqTrace("vgId:%d in restart procedure, not scan wal", vgId); - streamMetaWUnLock(pMeta); - return 0; - } - - pMeta->scanInfo.scanCounter += 1; - if (pMeta->scanInfo.scanCounter > MAX_REPEAT_SCAN_THRESHOLD) { - pMeta->scanInfo.scanCounter = MAX_REPEAT_SCAN_THRESHOLD; - } - - if (pMeta->scanInfo.scanCounter > 1) { - tqDebug("vgId:%d wal read task has been launched, remain scan times:%d", vgId, pMeta->scanInfo.scanCounter); - streamMetaWUnLock(pMeta); - return 0; - } - - int32_t numOfPauseTasks = pMeta->numOfPausedTasks; - if (ckPause && numOfTasks == numOfPauseTasks) { - tqDebug("vgId:%d ignore all submit, all streams had been paused, reset the walScanCounter", vgId); - - // reset the counter value, since we do not launch the scan wal operation. - pMeta->scanInfo.scanCounter = 0; - streamMetaWUnLock(pMeta); - return 0; - } - - tqDebug("vgId:%d create msg to start wal scan to launch stream tasks, numOfTasks:%d, vnd restored:%d", vgId, - numOfTasks, alreadyRestored); - - int32_t code = streamTaskSchedTask(&pTq->pVnode->msgCb, vgId, 0, 0, STREAM_EXEC_T_EXTRACT_WAL_DATA); + code = doScanWalAsync(pTq, ckPause); streamMetaWUnLock(pMeta); - return code; } @@ -368,11 +328,8 @@ int32_t doPutDataIntoInputQ(SStreamTask* pTask, int64_t maxVer, int32_t* numOfIt return code; } -int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle) { - *pScanIdle = true; - bool noDataInWal = true; +int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta) { int32_t vgId = pStreamMeta->vgId; - int32_t numOfTasks = taosArrayGetSize(pStreamMeta->pTaskList); if (numOfTasks == 0) { return TSDB_CODE_SUCCESS; @@ -410,8 +367,6 @@ int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle) { continue; } - *pScanIdle = false; - // seek the stored version and extract data from WAL code = setWalReaderStartOffset(pTask, vgId); if (code != TSDB_CODE_SUCCESS) { @@ -437,7 +392,6 @@ int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle) { streamMutexUnlock(&pTask->lock); if ((numOfItems > 0) || hasNewData) { - noDataInWal = false; code = streamTrySchedExec(pTask); if (code != TSDB_CODE_SUCCESS) { streamMetaReleaseTask(pStreamMeta, pTask); @@ -449,11 +403,47 @@ int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle) { streamMetaReleaseTask(pStreamMeta, pTask); } - // all wal are checked, and no new data available in wal. - if (noDataInWal) { - *pScanIdle = true; - } - taosArrayDestroy(pTaskList); return TSDB_CODE_SUCCESS; } + +int32_t doScanWalAsync(STQ* pTq, bool ckPause) { + SStreamMeta* pMeta = pTq->pStreamMeta; + bool alreadyRestored = pTq->pVnode->restored; + int32_t vgId = pMeta->vgId; + int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); + + if (numOfTasks == 0) { + tqDebug("vgId:%d no stream tasks existed to run", vgId); + return 0; + } + + if (pMeta->startInfo.startAllTasks) { + tqTrace("vgId:%d in restart procedure, not scan wal", vgId); + return 0; + } + + pMeta->scanInfo.scanCounter += 1; + if (pMeta->scanInfo.scanCounter > MAX_REPEAT_SCAN_THRESHOLD) { + pMeta->scanInfo.scanCounter = MAX_REPEAT_SCAN_THRESHOLD; + } + + if (pMeta->scanInfo.scanCounter > 1) { + tqDebug("vgId:%d wal read task has been launched, remain scan times:%d", vgId, pMeta->scanInfo.scanCounter); + return 0; + } + + int32_t numOfPauseTasks = pMeta->numOfPausedTasks; + if (ckPause && numOfTasks == numOfPauseTasks) { + tqDebug("vgId:%d ignore all submit, all streams had been paused, reset the walScanCounter", vgId); + + // reset the counter value, since we do not launch the scan wal operation. + pMeta->scanInfo.scanCounter = 0; + return 0; + } + + tqDebug("vgId:%d create msg to start wal scan to launch stream tasks, numOfTasks:%d, vnd restored:%d", vgId, + numOfTasks, alreadyRestored); + + return streamTaskSchedTask(&pTq->pVnode->msgCb, vgId, 0, 0, STREAM_EXEC_T_EXTRACT_WAL_DATA); +} diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index 1ea524dc78..68fb651566 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -718,8 +718,6 @@ int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen } } - streamMetaWUnLock(pMeta); - // drop the related fill-history task firstly if (hTaskId.taskId != 0 && hTaskId.streamId != 0) { tqDebug("s-task:0x%x vgId:%d drop rel fill-history task:0x%x firstly", pReq->taskId, vgId, (int32_t)hTaskId.taskId); @@ -737,7 +735,6 @@ int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen } // commit the update - streamMetaWLock(pMeta); int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta); tqDebug("vgId:%d task:0x%x dropped, remain tasks:%d", vgId, pReq->taskId, numOfTasks); diff --git a/source/dnode/vnode/src/tsdb/tsdbFS2.c b/source/dnode/vnode/src/tsdb/tsdbFS2.c index 0c9d9e56cf..d3495422b0 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS2.c +++ b/source/dnode/vnode/src/tsdb/tsdbFS2.c @@ -396,7 +396,7 @@ static int32_t tsdbFSAddEntryToFileObjHash(STFileHash *hash, const char *fname) STFileHashEntry *entry = taosMemoryMalloc(sizeof(*entry)); if (entry == NULL) return terrno; - strncpy(entry->fname, fname, TSDB_FILENAME_LEN); + tstrncpy(entry->fname, fname, TSDB_FILENAME_LEN); uint32_t idx = MurmurHash3_32(fname, strlen(fname)) % hash->numBucket; diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index 53e1c57f14..6dba1825ad 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -175,7 +175,7 @@ static int32_t tsdbWriteFilePage(STsdbFD *pFD, int32_t encryptAlgorithm, char *e opts.result = PacketData; opts.unitLen = 128; // strncpy(opts.key, tsEncryptKey, 16); - strncpy(opts.key, encryptKey, ENCRYPT_KEY_LEN); + tstrncpy(opts.key, encryptKey, ENCRYPT_KEY_LEN + 1); NewLen = CBC_Encrypt(&opts); @@ -249,7 +249,7 @@ static int32_t tsdbReadFilePage(STsdbFD *pFD, int64_t pgno, int32_t encryptAlgor opts.result = PacketData; opts.unitLen = 128; // strncpy(opts.key, tsEncryptKey, 16); - strncpy(opts.key, encryptKey, ENCRYPT_KEY_LEN); + tstrncpy(opts.key, encryptKey, ENCRYPT_KEY_LEN + 1); NewLen = CBC_Decrypt(&opts); diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c index 7c789e84ae..2ceeeca160 100644 --- a/source/dnode/vnode/src/vnd/vnodeCfg.c +++ b/source/dnode/vnode/src/vnd/vnodeCfg.c @@ -265,7 +265,7 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) { if (tsEncryptKey[0] == 0) { return terrno = TSDB_CODE_DNODE_INVALID_ENCRYPTKEY; } else { - strncpy(pCfg->tsdbCfg.encryptKey, tsEncryptKey, ENCRYPT_KEY_LEN); + tstrncpy(pCfg->tsdbCfg.encryptKey, tsEncryptKey, ENCRYPT_KEY_LEN + 1); } } #endif @@ -292,7 +292,7 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) { if (tsEncryptKey[0] == 0) { return terrno = TSDB_CODE_DNODE_INVALID_ENCRYPTKEY; } else { - strncpy(pCfg->walCfg.encryptKey, tsEncryptKey, ENCRYPT_KEY_LEN); + tstrncpy(pCfg->walCfg.encryptKey, tsEncryptKey, ENCRYPT_KEY_LEN + 1); } } #endif diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index 3ebcf50858..28d27b8893 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -103,9 +103,11 @@ static int32_t vnodeGetBufPoolToUse(SVnode *pVnode) { } code = taosThreadCondTimedWait(&pVnode->poolNotEmpty, &pVnode->mutex, &ts); - if (code && code != TSDB_CODE_TIMEOUT_ERROR) { - TSDB_CHECK_CODE(code, lino, _exit); + // ignore timeout error and retry + if (code == TSDB_CODE_TIMEOUT_ERROR) { + code = TSDB_CODE_SUCCESS; } + TSDB_CHECK_CODE(code, lino, _exit); } } } diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index 53365303b0..3eb4c89dc1 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -190,7 +190,14 @@ int32_t vnodeRenameVgroupId(const char *srcPath, const char *dstPath, int32_t sr char *tsdbFilePrefixPos = strstr(oldRname, tsdbFilePrefix); if (tsdbFilePrefixPos == NULL) continue; - int32_t tsdbFileVgId = atoi(tsdbFilePrefixPos + prefixLen); + int32_t tsdbFileVgId = 0; // atoi(tsdbFilePrefixPos + prefixLen); + ret = taosStr2int32(tsdbFilePrefixPos + prefixLen, &tsdbFileVgId); + if (ret != 0) { + vError("vgId:%d, failed to get tsdb file vgid since %s", dstVgId, tstrerror(ret)); + tfsClosedir(tsdbDir); + return ret; + } + if (tsdbFileVgId == srcVgId) { char *tsdbFileSurfixPos = tsdbFilePrefixPos + prefixLen + vnodeVgroupIdLen(srcVgId); diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 546cd6c3ae..aece557091 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -1024,7 +1024,7 @@ static int32_t vnodeProcessFetchTtlExpiredTbs(SVnode *pVnode, int64_t ver, void expiredTb.suid = *uid; terrno = metaReaderGetTableEntryByUid(&mr, *uid); if (terrno < 0) goto _end; - strncpy(buf, mr.me.name, TSDB_TABLE_NAME_LEN); + tstrncpy(buf, mr.me.name, TSDB_TABLE_NAME_LEN); void *p = taosArrayPush(pNames, buf); if (p == NULL) { goto _end; diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 83227dea9e..aa0711f421 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -2449,13 +2449,21 @@ static int32_t doSaveCurrentVal(SqlFunctionCtx* pCtx, int32_t rowIndex, int64_t SFirstLastRes* pInfo = GET_ROWCELL_INTERBUF(pResInfo); if (IS_VAR_DATA_TYPE(type)) { - pInfo->bytes = varDataTLen(pData); + if (type == TSDB_DATA_TYPE_JSON) { + pInfo->bytes = getJsonValueLen(pData); + } else { + pInfo->bytes = varDataTLen(pData); + } } (void)memcpy(pInfo->buf, pData, pInfo->bytes); if (pkData != NULL) { if (IS_VAR_DATA_TYPE(pInfo->pkType)) { - pInfo->pkBytes = varDataTLen(pkData); + if (pInfo->pkType == TSDB_DATA_TYPE_JSON) { + pInfo->pkBytes = getJsonValueLen(pkData); + } else { + pInfo->pkBytes = varDataTLen(pkData); + } } (void)memcpy(pInfo->buf + pInfo->bytes, pkData, pInfo->pkBytes); pInfo->pkData = pInfo->buf + pInfo->bytes; @@ -2985,7 +2993,11 @@ static int32_t doSaveLastrow(SqlFunctionCtx* pCtx, char* pData, int32_t rowIndex pInfo->isNull = false; if (IS_VAR_DATA_TYPE(pInputCol->info.type)) { - pInfo->bytes = varDataTLen(pData); + if (pInputCol->info.type == TSDB_DATA_TYPE_JSON) { + pInfo->bytes = getJsonValueLen(pData); + } else { + pInfo->bytes = varDataTLen(pData); + } } (void)memcpy(pInfo->buf, pData, pInfo->bytes); @@ -2994,7 +3006,11 @@ static int32_t doSaveLastrow(SqlFunctionCtx* pCtx, char* pData, int32_t rowIndex if (pCtx->hasPrimaryKey && !colDataIsNull_s(pkCol, rowIndex)) { char* pkData = colDataGetData(pkCol, rowIndex); if (IS_VAR_DATA_TYPE(pInfo->pkType)) { - pInfo->pkBytes = varDataTLen(pkData); + if (pInfo->pkType == TSDB_DATA_TYPE_JSON) { + pInfo->pkBytes = getJsonValueLen(pkData); + } else { + pInfo->pkBytes = varDataTLen(pkData); + } } (void)memcpy(pInfo->buf + pInfo->bytes, pkData, pInfo->pkBytes); pInfo->pkData = pInfo->buf + pInfo->bytes; @@ -5872,7 +5888,11 @@ void modeFunctionCleanupExt(SqlFunctionCtx* pCtx) { static int32_t saveModeTupleData(SqlFunctionCtx* pCtx, char* data, SModeInfo *pInfo, STuplePos* pPos) { if (IS_VAR_DATA_TYPE(pInfo->colType)) { - (void)memcpy(pInfo->buf, data, varDataTLen(data)); + if (pInfo->colType == TSDB_DATA_TYPE_JSON) { + (void)memcpy(pInfo->buf, data, getJsonValueLen(data)); + } else { + (void)memcpy(pInfo->buf, data, varDataTLen(data)); + } } else { (void)memcpy(pInfo->buf, data, pInfo->colBytes); } @@ -5882,7 +5902,16 @@ static int32_t saveModeTupleData(SqlFunctionCtx* pCtx, char* data, SModeInfo *pI static int32_t doModeAdd(SModeInfo* pInfo, int32_t rowIndex, SqlFunctionCtx* pCtx, char* data) { int32_t code = TSDB_CODE_SUCCESS; - int32_t hashKeyBytes = IS_STR_DATA_TYPE(pInfo->colType) ? varDataTLen(data) : pInfo->colBytes; + int32_t hashKeyBytes; + if (IS_VAR_DATA_TYPE(pInfo->colType)) { + if (pInfo->colType == TSDB_DATA_TYPE_JSON) { + hashKeyBytes = getJsonValueLen(data); + } else { + hashKeyBytes = varDataTLen(data); + } + } else { + hashKeyBytes = pInfo->colBytes; + } SModeItem* pHashItem = (SModeItem *)taosHashGet(pInfo->pHash, data, hashKeyBytes); if (pHashItem == NULL) { @@ -6654,14 +6683,32 @@ static void doSaveRateInfo(SRateInfo* pRateInfo, bool isFirst, int64_t ts, char* pRateInfo->firstValue = v; pRateInfo->firstKey = ts; if (pRateInfo->firstPk) { - int32_t pkBytes = IS_VAR_DATA_TYPE(pRateInfo->pkType) ? varDataTLen(pk) : pRateInfo->pkBytes; + int32_t pkBytes; + if (IS_VAR_DATA_TYPE(pRateInfo->pkType)) { + if (pRateInfo->pkType == TSDB_DATA_TYPE_JSON) { + pkBytes = getJsonValueLen(pk); + } else { + pkBytes = varDataTLen(pk); + } + } else { + pkBytes = pRateInfo->pkBytes; + } (void)memcpy(pRateInfo->firstPk, pk, pkBytes); } } else { pRateInfo->lastValue = v; pRateInfo->lastKey = ts; if (pRateInfo->lastPk) { - int32_t pkBytes = IS_VAR_DATA_TYPE(pRateInfo->pkType) ? varDataTLen(pk) : pRateInfo->pkBytes; + int32_t pkBytes; + if (IS_VAR_DATA_TYPE(pRateInfo->pkType)) { + if (pRateInfo->pkType == TSDB_DATA_TYPE_JSON) { + pkBytes = getJsonValueLen(pk); + } else { + pkBytes = varDataTLen(pk); + } + } else { + pkBytes = pRateInfo->pkBytes; + } (void)memcpy(pRateInfo->lastPk, pk, pkBytes); } } diff --git a/source/libs/monitorfw/src/taos_monitor_util.c b/source/libs/monitorfw/src/taos_monitor_util.c index b0eff27507..1240526d48 100644 --- a/source/libs/monitorfw/src/taos_monitor_util.c +++ b/source/libs/monitorfw/src/taos_monitor_util.c @@ -41,10 +41,11 @@ void taos_monitor_split_str_metric(char** arr, taos_metric_t* metric, const char memset(name, 0, size + 1); memcpy(name, metric->name, size); - char* s = strtok(name, del); + char* saveptr; + char* s = strtok_r(name, del, &saveptr); while (s != NULL) { *arr++ = s; - s = strtok(NULL, del); + s = strtok_r(NULL, del, &saveptr); } *buf = name; diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index d75f39f376..08fa18ab4b 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -2950,17 +2950,16 @@ int32_t nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal) { case TSDB_DATA_TYPE_VARCHAR: case TSDB_DATA_TYPE_VARBINARY: case TSDB_DATA_TYPE_GEOMETRY: - pVal->pz = taosMemoryMalloc(pVal->nLen + 1); + pVal->pz = taosMemoryCalloc(1, pVal->nLen + 1); if (pVal->pz) { - memcpy(pVal->pz, pNode->datum.p, pVal->nLen); - pVal->pz[pVal->nLen] = 0; + memcpy(pVal->pz, pNode->datum.p, varDataTLen(pNode->datum.p)); } else { code = terrno; } break; case TSDB_DATA_TYPE_JSON: pVal->nLen = getJsonValueLen(pNode->datum.p); - pVal->pz = taosMemoryMalloc(pVal->nLen); + pVal->pz = taosMemoryCalloc(1, pVal->nLen); if (pVal->pz) { memcpy(pVal->pz, pNode->datum.p, pVal->nLen); } else { diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 531c6afc73..e2e6ef23eb 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -2872,13 +2872,16 @@ static int32_t getIpV4RangeFromWhitelistItem(char* ipRange, SIpV4Range* pIpRange *slash = '\0'; struct in_addr addr; if (uv_inet_pton(AF_INET, ipCopy, &addr) == 0) { - int prefix = atoi(slash + 1); - if (prefix < 0 || prefix > 32) { - code = TSDB_CODE_PAR_INVALID_IP_RANGE; - } else { - pIpRange->ip = addr.s_addr; - pIpRange->mask = prefix; - code = TSDB_CODE_SUCCESS; + int32_t prefix = 0; + code = taosStr2int32(slash + 1, &prefix); + if (code == 0) { + if (prefix < 0 || prefix > 32) { + code = TSDB_CODE_PAR_INVALID_IP_RANGE; + } else { + pIpRange->ip = addr.s_addr; + pIpRange->mask = prefix; + code = TSDB_CODE_SUCCESS; + } } } else { code = TSDB_CODE_PAR_INVALID_IP_RANGE; diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index bfe9513594..6cfbacce5b 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -612,10 +612,10 @@ static int32_t getIntegerFromAuthStr(const char* pStart, char** pNext) { return taosStr2Int32(buf, NULL, 10); } -static void getStringFromAuthStr(const char* pStart, char* pStr, char** pNext) { +static void getStringFromAuthStr(const char* pStart, char* pStr, uint32_t dstLen, char** pNext) { char* p = strchr(pStart, '*'); if (NULL == p) { - tstrncpy(pStr, pStart, strlen(pStart) + 1); + tstrncpy(pStr, pStart, dstLen); *pNext = NULL; } else { strncpy(pStr, pStart, p - pStart); @@ -628,10 +628,10 @@ static void getStringFromAuthStr(const char* pStart, char* pStr, char** pNext) { static void stringToUserAuth(const char* pStr, int32_t len, SUserAuthInfo* pUserAuth) { char* p = NULL; - getStringFromAuthStr(pStr, pUserAuth->user, &p); + getStringFromAuthStr(pStr, pUserAuth->user, TSDB_USER_LEN, &p); pUserAuth->tbName.acctId = getIntegerFromAuthStr(p, &p); - getStringFromAuthStr(p, pUserAuth->tbName.dbname, &p); - getStringFromAuthStr(p, pUserAuth->tbName.tname, &p); + getStringFromAuthStr(p, pUserAuth->tbName.dbname, TSDB_DB_NAME_LEN, &p); + getStringFromAuthStr(p, pUserAuth->tbName.tname, TSDB_TABLE_NAME_LEN, &p); if (pUserAuth->tbName.tname[0]) { pUserAuth->tbName.type = TSDB_TABLE_NAME_T; } else { diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index af232c667d..5d47e73e42 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -91,11 +91,12 @@ static int32_t getSlotKey(SNode* pNode, const char* pStmtName, char** ppKey, int *pLen = taosHashBinary(*ppKey, strlen(*ppKey)); return code; } - *ppKey = taosMemoryCalloc(1, strlen(pVal->literal) + 1 + TSDB_COL_NAME_LEN + 1 + extraBufLen); + int32_t literalLen = strlen(pVal->literal); + *ppKey = taosMemoryCalloc(1, literalLen + 1 + TSDB_COL_NAME_LEN + 1 + extraBufLen); if (!*ppKey) { return terrno; } - TAOS_STRNCAT(*ppKey, pVal->literal, strlen(pVal->literal)); + TAOS_STRNCAT(*ppKey, pVal->literal, literalLen); TAOS_STRNCAT(*ppKey, ".", 2); TAOS_STRNCAT(*ppKey, ((SExprNode*)pNode)->aliasName, TSDB_COL_NAME_LEN); *pLen = taosHashBinary(*ppKey, strlen(*ppKey)); diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index 209110b014..b3610d035f 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -1241,7 +1241,6 @@ EDealRes sclRewriteFunction(SNode **pNode, SScalarCtx *ctx) { ctx->code = TSDB_CODE_OUT_OF_MEMORY; return DEAL_RES_ERROR; } - res->node.resType.bytes = varDataTLen(output.columnData->pData); (void)memcpy(res->datum.p, output.columnData->pData, varDataTLen(output.columnData->pData)); } else { ctx->code = nodesSetValueNodeValue(res, output.columnData->pData); diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index 9be0e3fc40..37249b5418 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -1129,7 +1129,11 @@ int32_t schLaunchRemoteTask(SSchJob *pJob, SSchTask *pTask) { int32_t schLaunchLocalTask(SSchJob *pJob, SSchTask *pTask) { // SCH_ERR_JRET(schSetTaskCandidateAddrs(pJob, pTask)); if (NULL == schMgmt.queryMgmt) { - SCH_ERR_RET(qWorkerInit(NODE_TYPE_CLIENT, CLIENT_HANDLE, (void **)&schMgmt.queryMgmt, NULL)); + void* p = NULL; + SCH_ERR_RET(qWorkerInit(NODE_TYPE_CLIENT, CLIENT_HANDLE, &p, NULL)); + if (atomic_val_compare_exchange_ptr(&schMgmt.queryMgmt, NULL, p)) { + qWorkerDestroy(&p); + } } SArray *explainRes = NULL; diff --git a/source/libs/stream/inc/streamBackendRocksdb.h b/source/libs/stream/inc/streamBackendRocksdb.h index d313acc61d..d053a52832 100644 --- a/source/libs/stream/inc/streamBackendRocksdb.h +++ b/source/libs/stream/inc/streamBackendRocksdb.h @@ -166,10 +166,12 @@ int32_t streamStateDel_rocksdb(SStreamState* pState, const SWinKey* key); int32_t streamStateClear_rocksdb(SStreamState* pState); void streamStateCurNext_rocksdb(SStreamStateCur* pCur); int32_t streamStateGetFirst_rocksdb(SStreamState* pState, SWinKey* key); -int32_t streamStateGetGroupKVByCur_rocksdb(SStreamState* pState, SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); +int32_t streamStateGetGroupKVByCur_rocksdb(SStreamState* pState, SStreamStateCur* pCur, SWinKey* pKey, + const void** pVal, int32_t* pVLen); int32_t streamStateAddIfNotExist_rocksdb(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); void streamStateCurPrev_rocksdb(SStreamStateCur* pCur); -int32_t streamStateGetKVByCur_rocksdb(SStreamState* pState, SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); +int32_t streamStateGetKVByCur_rocksdb(SStreamState* pState, SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, + int32_t* pVLen); SStreamStateCur* streamStateGetAndCheckCur_rocksdb(SStreamState* pState, SWinKey* key); SStreamStateCur* streamStateSeekKeyNext_rocksdb(SStreamState* pState, const SWinKey* key); SStreamStateCur* streamStateSeekKeyPrev_rocksdb(SStreamState* pState, const SWinKey* key); @@ -217,15 +219,14 @@ int32_t streamStateFillGetGroupKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* p // partag cf int32_t streamStatePutParTag_rocksdb(SStreamState* pState, int64_t groupId, const void* tag, int32_t tagLen); int32_t streamStateGetParTag_rocksdb(SStreamState* pState, int64_t groupId, void** tagVal, int32_t* tagLen); -void streamStateParTagSeekKeyNext_rocksdb(SStreamState* pState, const int64_t groupId, SStreamStateCur* pCur); -int32_t streamStateParTagGetKVByCur_rocksdb(SStreamStateCur* pCur, int64_t* pGroupId, const void** pVal, int32_t* pVLen); +void streamStateParTagSeekKeyNext_rocksdb(SStreamState* pState, const int64_t groupId, SStreamStateCur* pCur); +int32_t streamStateParTagGetKVByCur_rocksdb(SStreamStateCur* pCur, int64_t* pGroupId, const void** pVal, + int32_t* pVLen); // parname cf int32_t streamStatePutParName_rocksdb(SStreamState* pState, int64_t groupId, const char tbname[TSDB_TABLE_NAME_LEN]); int32_t streamStateGetParName_rocksdb(SStreamState* pState, int64_t groupId, void** pVal); -void streamStateDestroy_rocksdb(SStreamState* pState, bool remove); - // default cf int32_t streamDefaultPut_rocksdb(SStreamState* pState, const void* key, void* pVal, int32_t pVLen); int32_t streamDefaultGet_rocksdb(SStreamState* pState, const void* key, void** pVal, int32_t* pVLen); diff --git a/source/libs/stream/inc/streamInt.h b/source/libs/stream/inc/streamInt.h index 863bc76c79..8f9e4a311c 100644 --- a/source/libs/stream/inc/streamInt.h +++ b/source/libs/stream/inc/streamInt.h @@ -37,7 +37,7 @@ extern "C" { #define META_HB_CHECK_INTERVAL 200 #define META_HB_SEND_IDLE_COUNTER 25 // send hb every 5 sec #define STREAM_TASK_KEY_LEN ((sizeof(int64_t)) << 1) -#define STREAM_TASK_QUEUE_CAPACITY 20480 +#define STREAM_TASK_QUEUE_CAPACITY 5120 #define STREAM_TASK_QUEUE_CAPACITY_IN_SIZE (30) // clang-format off diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index 68c99aa1b3..f7cac3b562 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -153,7 +153,6 @@ void taskDbUnRefChkp(STaskDbWrapper* pTaskDb, int64_t chkp); int32_t chkpAddExtraInfo(char* pChkpIdDir, int64_t chkpId, int64_t processId); int32_t chkpLoadExtraInfo(char* pChkpIdDir, int64_t* chkpId, int64_t* processId); -#define GEN_COLUMN_FAMILY_NAME(name, idstr, SUFFIX) sprintf(name, "%s_%s", idstr, (SUFFIX)); int32_t copyFiles(const char* src, const char* dst); uint32_t nextPow2(uint32_t x); @@ -1156,13 +1155,17 @@ int32_t chkpMayDelObsolete(void* arg, int64_t chkpId, char* path) { taosArrayDestroy(pBackend->chkpSaved); pBackend->chkpSaved = chkpDup; + chkpDup = NULL; TAOS_UNUSED(taosThreadRwlockUnlock(&pBackend->chkpDirLock)); for (int i = 0; i < taosArrayGetSize(chkpDel); i++) { int64_t id = *(int64_t*)taosArrayGet(chkpDel, i); char tbuf[256] = {0}; - sprintf(tbuf, "%s%scheckpoint%" PRId64 "", path, TD_DIRSEP, id); + if (snprintf(tbuf, sizeof(tbuf), "%s%scheckpoint%" PRId64 "", path, TD_DIRSEP, id) >= sizeof(tbuf)) { + code = TSDB_CODE_OUT_OF_RANGE; + TAOS_CHECK_GOTO(code, NULL, _exception); + } stInfo("backend remove obsolete checkpoint: %s", tbuf); if (taosIsDir(tbuf)) { @@ -1187,12 +1190,17 @@ int chkpIdComp(const void* a, const void* b) { } int32_t taskDbLoadChkpInfo(STaskDbWrapper* pBackend) { int32_t code = 0; - char* pChkpDir = taosMemoryCalloc(1, 256); + int32_t nBytes = 0; + int32_t cap = 256; + char* pChkpDir = taosMemoryCalloc(1, cap); if (pChkpDir == NULL) { return terrno; } - sprintf(pChkpDir, "%s%s%s", pBackend->path, TD_DIRSEP, "checkpoints"); + nBytes = snprintf(pChkpDir, cap, "%s%s%s", pBackend->path, TD_DIRSEP, "checkpoints"); + if (nBytes >= cap) { + return TSDB_CODE_OUT_OF_RANGE; + } if (!taosIsDir(pChkpDir)) { taosMemoryFree(pChkpDir); return 0; @@ -1413,12 +1421,18 @@ int32_t taskDbDestroySnap(void* arg, SArray* pSnapInfo) { if (pSnapInfo == NULL) return 0; SStreamMeta* pMeta = arg; int32_t code = 0; + int32_t cap = 256; + int32_t nBytes = 0; streamMutexLock(&pMeta->backendMutex); - char buf[128] = {0}; + char buf[256] = {0}; for (int i = 0; i < taosArrayGetSize(pSnapInfo); i++) { SStreamTaskSnap* pSnap = taosArrayGet(pSnapInfo, i); - sprintf(buf, "0x%" PRIx64 "-0x%x", pSnap->streamId, (int32_t)pSnap->taskId); + nBytes = snprintf(buf, cap, "0x%" PRIx64 "-0x%x", pSnap->streamId, (int32_t)pSnap->taskId); + if (nBytes <= 0 || nBytes >= cap) { + code = TSDB_CODE_OUT_OF_RANGE; + break; + } STaskDbWrapper** pTaskDb = taosHashGet(pMeta->pTaskDbUnique, buf, strlen(buf)); if (pTaskDb == NULL || *pTaskDb == NULL) { stWarn("stream backend:%p failed to find task db, streamId:% " PRId64 "", pMeta, pSnap->streamId); @@ -1430,7 +1444,7 @@ int32_t taskDbDestroySnap(void* arg, SArray* pSnapInfo) { taskDbUnRefChkp(*pTaskDb, pSnap->chkpId); } streamMutexUnlock(&pMeta->backendMutex); - return 0; + return code; } #ifdef BUILD_NO_CALL int32_t streamBackendAddInUseChkp(void* arg, int64_t chkpId) { @@ -1685,9 +1699,6 @@ void streamBackendDelCompare(void* backend, void* arg) { taosMemoryFree(node); } } -#ifdef BUILD_NO_CALL -void streamStateDestroy_rocksdb(SStreamState* pState, bool remove) { streamStateCloseBackend(pState, remove); } -#endif void destroyRocksdbCfInst(RocksdbCfInst* inst) { int cfLen = sizeof(ginitDict) / sizeof(ginitDict[0]); if (inst->pHandle) { @@ -1712,7 +1723,7 @@ void destroyRocksdbCfInst(RocksdbCfInst* inst) { } // |key|-----value------| -// |key|ttl|len|userData| +// |key|ttl|len|userData int defaultKeyComp(void* state, const char* aBuf, size_t aLen, const char* bBuf, size_t bLen) { int len = aLen < bLen ? aLen : bLen; @@ -2447,43 +2458,50 @@ void taskDbDestroyChkpOpt(STaskDbWrapper* pTaskDb) { int32_t taskDbBuildFullPath(char* path, char* key, char** dbFullPath, char** stateFullPath) { int32_t code = 0; - char* statePath = taosMemoryCalloc(1, strlen(path) + 128); + int32_t cap = strlen(path) + 128, nBytes = 0; + char* statePath = NULL; + char* dbPath = NULL; + + statePath = taosMemoryCalloc(1, cap); if (statePath == NULL) { - return terrno; + TAOS_CHECK_GOTO(terrno, NULL, _err); + } + + nBytes = snprintf(statePath, cap, "%s%s%s", path, TD_DIRSEP, key); + if (nBytes < 0 || nBytes >= cap) { + code = TSDB_CODE_OUT_OF_RANGE; + TAOS_CHECK_GOTO(code, NULL, _err); } - sprintf(statePath, "%s%s%s", path, TD_DIRSEP, key); if (!taosDirExist(statePath)) { code = taosMulMkDir(statePath); - if (code != 0) { - code = TAOS_SYSTEM_ERROR(errno); - stError("failed to create dir: %s, reason:%s", statePath, tstrerror(code)); - taosMemoryFree(statePath); - return code; - } + TAOS_CHECK_GOTO(code, NULL, _err); } - char* dbPath = taosMemoryCalloc(1, strlen(statePath) + 128); + dbPath = taosMemoryCalloc(1, cap); if (dbPath == NULL) { - taosMemoryFree(statePath); - return terrno; + TAOS_CHECK_GOTO(terrno, NULL, _err); + } + nBytes = snprintf(dbPath, cap, "%s%s%s", statePath, TD_DIRSEP, "state"); + if (nBytes < 0 || nBytes >= cap) { + code = TSDB_CODE_OUT_OF_RANGE; + TAOS_CHECK_GOTO(code, NULL, _err); } - sprintf(dbPath, "%s%s%s", statePath, TD_DIRSEP, "state"); if (!taosDirExist(dbPath)) { code = taosMulMkDir(dbPath); - if (code != 0) { - code = TAOS_SYSTEM_ERROR(errno); - stError("failed to create dir: %s, reason:%s", dbPath, tstrerror(code)); - taosMemoryFree(statePath); - taosMemoryFree(dbPath); - return code; - } + TAOS_CHECK_GOTO(code, NULL, _err); } *dbFullPath = dbPath; *stateFullPath = statePath; return 0; +_err: + stError("failed to create dir: %s, reason:%s", dbPath, tstrerror(code)); + + taosMemoryFree(statePath); + taosMemoryFree(dbPath); + return code; } void taskDbUpdateChkpId(void* pTaskDb, int64_t chkpId) { @@ -2864,6 +2882,7 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t int64_t streamId; int32_t taskId, dummy = 0; char suffix[64] = {0}; + int32_t code = 0; rocksdb_options_t** cfOpts = taosMemoryCalloc(nCf, sizeof(rocksdb_options_t*)); RocksdbCfParam* params = taosMemoryCalloc(nCf, sizeof(RocksdbCfParam)); @@ -2873,6 +2892,7 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t for (int i = 0; i < nCf; i++) { char* cf = cfs[i]; char funcname[64] = {0}; + cfOpts[i] = rocksdb_options_create_copy(handle->dbOpt); if (i == 0) continue; if (3 == sscanf(cf, "0x%" PRIx64 "-%d_%s", &streamId, &taskId, funcname)) { @@ -2909,7 +2929,7 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t taosMemoryFree(params); taosMemoryFree(cfOpts); // fix other leak - return -1; + return TSDB_CODE_THIRDPARTY_ERROR; } else { stDebug("succ to open rocksdb cf"); } @@ -2929,8 +2949,13 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t char funcname[64] = {0}; if (3 == sscanf(cf, "0x%" PRIx64 "-%d_%s", &streamId, &taskId, funcname)) { - char idstr[128] = {0}; - sprintf(idstr, "0x%" PRIx64 "-%d", streamId, taskId); + char idstr[128] = {0}; + int32_t nBytes = snprintf(idstr, sizeof(idstr), "0x%" PRIx64 "-%d", streamId, taskId); + if (nBytes <= 0 || nBytes >= sizeof(idstr)) { + code = TSDB_CODE_OUT_OF_RANGE; + stError("failed to open cf since %s", tstrerror(code)); + return code; + } int idx = streamStateGetCfIdx(NULL, funcname); @@ -2997,117 +3022,7 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t taosMemoryFree(cfOpts); return 0; } -#ifdef BUILD_NO_CALL -int streamStateOpenBackend(void* backend, SStreamState* pState) { - taosAcquireRef(streamBackendId, pState->streamBackendRid); - SBackendWrapper* handle = backend; - SBackendCfWrapper* pBackendCfWrapper = taosMemoryCalloc(1, sizeof(SBackendCfWrapper)); - streamMutexLock(&handle->cfMutex); - RocksdbCfInst** ppInst = taosHashGet(handle->cfInst, pState->pTdbState->idstr, strlen(pState->pTdbState->idstr) + 1); - if (ppInst != NULL && *ppInst != NULL) { - RocksdbCfInst* inst = *ppInst; - pBackendCfWrapper->rocksdb = inst->db; - pBackendCfWrapper->pHandle = (void**)inst->pHandle; - pBackendCfWrapper->writeOpts = inst->wOpt; - pBackendCfWrapper->readOpts = inst->rOpt; - pBackendCfWrapper->cfOpts = (void**)(inst->cfOpt); - pBackendCfWrapper->dbOpt = handle->dbOpt; - pBackendCfWrapper->param = inst->param; - pBackendCfWrapper->pBackend = handle; - pBackendCfWrapper->pComparNode = inst->pCompareNode; - streamMutexUnlock(&handle->cfMutex); - pBackendCfWrapper->backendId = pState->streamBackendRid; - memcpy(pBackendCfWrapper->idstr, pState->pTdbState->idstr, sizeof(pState->pTdbState->idstr)); - - int64_t id = taosAddRef(streamBackendCfWrapperId, pBackendCfWrapper); - pState->pTdbState->backendCfWrapperId = id; - pState->pTdbState->pBackendCfWrapper = pBackendCfWrapper; - stInfo("succ to open state %p on backendWrapper, %p, %s", pState, pBackendCfWrapper, pBackendCfWrapper->idstr); - - inst->pHandle = NULL; - inst->cfOpt = NULL; - inst->param = NULL; - - inst->wOpt = NULL; - inst->rOpt = NULL; - return 0; - } - streamMutexUnlock(&handle->cfMutex); - - char* err = NULL; - int cfLen = sizeof(ginitDict) / sizeof(ginitDict[0]); - - RocksdbCfParam* param = taosMemoryCalloc(cfLen, sizeof(RocksdbCfParam)); - const rocksdb_options_t** cfOpt = taosMemoryCalloc(cfLen, sizeof(rocksdb_options_t*)); - for (int i = 0; i < cfLen; i++) { - cfOpt[i] = rocksdb_options_create_copy(handle->dbOpt); - // refactor later - rocksdb_block_based_table_options_t* tableOpt = rocksdb_block_based_options_create(); - rocksdb_block_based_options_set_block_cache(tableOpt, handle->cache); - rocksdb_block_based_options_set_partition_filters(tableOpt, 1); - - rocksdb_filterpolicy_t* filter = rocksdb_filterpolicy_create_bloom(15); - rocksdb_block_based_options_set_filter_policy(tableOpt, filter); - - rocksdb_options_set_block_based_table_factory((rocksdb_options_t*)cfOpt[i], tableOpt); - - param[i].tableOpt = tableOpt; - }; - - rocksdb_comparator_t** pCompare = taosMemoryCalloc(cfLen, sizeof(rocksdb_comparator_t*)); - for (int i = 0; i < cfLen; i++) { - SCfInit* cf = &ginitDict[i]; - - rocksdb_comparator_t* compare = rocksdb_comparator_create(NULL, cf->destroyCmp, cf->cmpKey, cf->cmpName); - rocksdb_options_set_comparator((rocksdb_options_t*)cfOpt[i], compare); - pCompare[i] = compare; - } - rocksdb_column_family_handle_t** cfHandle = taosMemoryCalloc(cfLen, sizeof(rocksdb_column_family_handle_t*)); - pBackendCfWrapper->rocksdb = handle->db; - pBackendCfWrapper->pHandle = (void**)cfHandle; - pBackendCfWrapper->writeOpts = rocksdb_writeoptions_create(); - pBackendCfWrapper->readOpts = rocksdb_readoptions_create(); - pBackendCfWrapper->cfOpts = (void**)cfOpt; - pBackendCfWrapper->dbOpt = handle->dbOpt; - pBackendCfWrapper->param = param; - pBackendCfWrapper->pBackend = handle; - pBackendCfWrapper->backendId = pState->streamBackendRid; - taosThreadRwlockInit(&pBackendCfWrapper->rwLock, NULL); - SCfComparator compare = {.comp = pCompare, .numOfComp = cfLen}; - pBackendCfWrapper->pComparNode = streamBackendAddCompare(handle, &compare); - rocksdb_writeoptions_disable_WAL(pBackendCfWrapper->writeOpts, 1); - memcpy(pBackendCfWrapper->idstr, pState->pTdbState->idstr, sizeof(pState->pTdbState->idstr)); - - int64_t id = taosAddRef(streamBackendCfWrapperId, pBackendCfWrapper); - pState->pTdbState->backendCfWrapperId = id; - pState->pTdbState->pBackendCfWrapper = pBackendCfWrapper; - stInfo("succ to open state %p on backendWrapper %p %s", pState, pBackendCfWrapper, pBackendCfWrapper->idstr); - return 0; -} - -void streamStateCloseBackend(SStreamState* pState, bool remove) { - SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper; - SBackendWrapper* pHandle = wrapper->pBackend; - - stInfo("start to close state on backend: %p", pHandle); - - streamMutexLock(&pHandle->cfMutex); - RocksdbCfInst** ppInst = taosHashGet(pHandle->cfInst, wrapper->idstr, strlen(pState->pTdbState->idstr) + 1); - if (ppInst != NULL && *ppInst != NULL) { - RocksdbCfInst* inst = *ppInst; - taosMemoryFree(inst); - taosHashRemove(pHandle->cfInst, pState->pTdbState->idstr, strlen(pState->pTdbState->idstr) + 1); - } - streamMutexUnlock(&pHandle->cfMutex); - - char* status[] = {"close", "drop"}; - stInfo("start to %s state %p on backendWrapper %p %s", status[remove == false ? 0 : 1], pState, wrapper, - wrapper->idstr); - wrapper->remove |= remove; // update by other pState - taosReleaseRef(streamBackendCfWrapperId, pState->pTdbState->backendCfWrapperId); -} -#endif void streamStateDestroyCompar(void* arg) { SCfComparator* comp = (SCfComparator*)arg; for (int i = 0; i < comp->numOfComp; i++) { @@ -3386,7 +3301,8 @@ int32_t streamStateGetFirst_rocksdb(SStreamState* pState, SWinKey* key) { return streamStateDel_rocksdb(pState, &tmp); } -int32_t streamStateFillGetGroupKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) { +int32_t streamStateFillGetGroupKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, + int32_t* pVLen) { if (!pCur) { return -1; } @@ -4343,7 +4259,7 @@ int32_t streamStatePutParTag_rocksdb(SStreamState* pState, int64_t groupId, cons void streamStateParTagSeekKeyNext_rocksdb(SStreamState* pState, const int64_t groupId, SStreamStateCur* pCur) { if (pCur == NULL) { - return ; + return; } STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; pCur->number = pState->number; @@ -4353,13 +4269,13 @@ void streamStateParTagSeekKeyNext_rocksdb(SStreamState* pState, const int64_t gr int i = streamStateGetCfIdx(pState, "partag"); if (i < 0) { stError("streamState failed to put to cf name:%s", "partag"); - return ; + return; } char buf[128] = {0}; int32_t klen = ginitDict[i].enFunc((void*)&groupId, buf); if (!streamStateIterSeekAndValid(pCur->iter, buf, klen)) { - return ; + return; } // skip ttl expired data while (rocksdb_iter_valid(pCur->iter) && iterValueIsStale(pCur->iter)) { @@ -4371,13 +4287,14 @@ void streamStateParTagSeekKeyNext_rocksdb(SStreamState* pState, const int64_t gr size_t kLen = 0; char* keyStr = (char*)rocksdb_iter_key(pCur->iter, &kLen); TAOS_UNUSED(parKeyDecode((void*)&curGroupId, keyStr)); - if (curGroupId > groupId) return ; + if (curGroupId > groupId) return; rocksdb_iter_next(pCur->iter); } } -int32_t streamStateParTagGetKVByCur_rocksdb(SStreamStateCur* pCur, int64_t* pGroupId, const void** pVal, int32_t* pVLen) { +int32_t streamStateParTagGetKVByCur_rocksdb(SStreamStateCur* pCur, int64_t* pGroupId, const void** pVal, + int32_t* pVLen) { stDebug("streamStateFillGetKVByCur_rocksdb"); if (!pCur) { return -1; @@ -4710,11 +4627,12 @@ int32_t compareHashTableImpl(SHashObj* p1, SHashObj* p2, SArray* diff) { while (pIter) { char* name = taosHashGetKey(pIter, &len); if (!isBkdDataMeta(name, len) && !taosHashGet(p1, name, len)) { - char* fname = taosMemoryCalloc(1, len + 1); + int32_t cap = len + 1; + char* fname = taosMemoryCalloc(1, cap); if (fname == NULL) { return terrno; } - TAOS_UNUSED(strncpy(fname, name, len)); + tstrncpy(fname, name, cap); if (taosArrayPush(diff, &fname) == NULL) { taosMemoryFree(fname); return terrno; @@ -4738,17 +4656,32 @@ int32_t compareHashTable(SHashObj* p1, SHashObj* p2, SArray* add, SArray* del) { void hashTableToDebug(SHashObj* pTbl, char** buf) { size_t sz = taosHashGetSize(pTbl); int32_t total = 0; - char* p = taosMemoryCalloc(1, sz * 16 + 4); - void* pIter = taosHashIterate(pTbl, NULL); + int32_t cap = sz * 16 + 4; + + char* p = taosMemoryCalloc(1, cap); + if (p == NULL) { + stError("failed to alloc memory for stream snapshot debug info"); + return; + } + + void* pIter = taosHashIterate(pTbl, NULL); while (pIter) { size_t len = 0; char* name = taosHashGetKey(pIter, &len); - char* tname = taosMemoryCalloc(1, len + 1); - memcpy(tname, name, len); - total += sprintf(p + total, "%s,", tname); + if (name == NULL || len <= 0) { + pIter = taosHashIterate(pTbl, pIter); + continue; + } + int32_t left = cap - strlen(p); + int32_t nBytes = snprintf(p + total, left, "%s,", name); + if (nBytes <= 0 || nBytes >= left) { + stError("failed to debug snapshot info since %s", tstrerror(TSDB_CODE_OUT_OF_RANGE)); + taosMemoryFree(p); + return; + } pIter = taosHashIterate(pTbl, pIter); - taosMemoryFree(tname); + total += nBytes; } if (total > 0) { p[total - 1] = 0; @@ -4759,13 +4692,30 @@ void strArrayDebugInfo(SArray* pArr, char** buf) { int32_t sz = taosArrayGetSize(pArr); if (sz <= 0) return; - char* p = (char*)taosMemoryCalloc(1, 64 + sz * 64); - int32_t total = 0; + int32_t code = 0; + int32_t total = 0, nBytes = 0; + int32_t cap = 64 + sz * 64; + + char* p = (char*)taosMemoryCalloc(1, cap); + if (p == NULL) { + stError("failed to alloc memory for stream snapshot debug info"); + return; + } for (int i = 0; i < sz; i++) { - char* name = taosArrayGetP(pArr, i); - total += sprintf(p + total, "%s,", name); + char* name = taosArrayGetP(pArr, i); + int32_t left = cap - strlen(p); + nBytes = snprintf(p + total, left, "%s,", name); + if (nBytes <= 0 || nBytes >= left) { + code = TSDB_CODE_OUT_OF_RANGE; + stError("failed to debug snapshot info since %s", tstrerror(code)); + taosMemoryFree(p); + return; + } + + total += nBytes; } + p[total - 1] = 0; *buf = p; @@ -4775,16 +4725,16 @@ void dbChkpDebugInfo(SDbChkp* pDb) { char* p[4] = {NULL}; hashTableToDebug(pDb->pSstTbl[pDb->idx], &p[0]); - stTrace("chkp previous file: [%s]", p[0]); + if (p[0]) stTrace("chkp previous file: [%s]", p[0]); hashTableToDebug(pDb->pSstTbl[1 - pDb->idx], &p[1]); - stTrace("chkp curr file: [%s]", p[1]); + if (p[1]) stTrace("chkp curr file: [%s]", p[1]); strArrayDebugInfo(pDb->pAdd, &p[2]); - stTrace("chkp newly addded file: [%s]", p[2]); + if (p[2]) stTrace("chkp newly addded file: [%s]", p[2]); strArrayDebugInfo(pDb->pDel, &p[3]); - stTrace("chkp newly deleted file: [%s]", p[3]); + if (p[3]) stTrace("chkp newly deleted file: [%s]", p[3]); for (int i = 0; i < 4; i++) { taosMemoryFree(p[i]); @@ -4870,13 +4820,14 @@ int32_t dbChkpGetDelta(SDbChkp* p, int64_t chkpId, SArray* list) { size_t len = 0; char* name = taosHashGetKey(pIter, &len); if (name != NULL && !isBkdDataMeta(name, len)) { - char* fname = taosMemoryCalloc(1, len + 1); + int32_t cap = len + 1; + char* fname = taosMemoryCalloc(1, cap); if (fname == NULL) { TAOS_UNUSED(taosThreadRwlockUnlock(&p->rwLock)); return terrno; } - TAOS_UNUSED(strncpy(fname, name, len)); + tstrncpy(fname, name, cap); if (taosArrayPush(p->pAdd, &fname) == NULL) { taosMemoryFree(fname); TAOS_UNUSED(taosThreadRwlockUnlock(&p->rwLock)); @@ -5351,7 +5302,8 @@ SStreamStateCur* streamStateSeekKeyPrev_rocksdb(SStreamState* pState, const SWin return NULL; } -int32_t streamStateGetGroupKVByCur_rocksdb(SStreamState* pState, SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) { +int32_t streamStateGetGroupKVByCur_rocksdb(SStreamState* pState, SStreamStateCur* pCur, SWinKey* pKey, + const void** pVal, int32_t* pVLen) { if (!pCur) { return -1; } diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index 3ca283ce98..641f41daa9 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -591,7 +591,6 @@ int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SV { // destroy the related fill-history tasks // drop task should not in the meta-lock, and drop the related fill-history task now - streamMetaWUnLock(pMeta); if (pReq->dropRelHTask) { code = streamMetaUnregisterTask(pMeta, pReq->hStreamId, pReq->hTaskId); int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta); @@ -599,7 +598,6 @@ int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SV id, vgId, pReq->taskId, numOfTasks); } - streamMetaWLock(pMeta); if (pReq->dropRelHTask) { code = streamMetaCommit(pMeta); } @@ -675,8 +673,6 @@ int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SV return TSDB_CODE_SUCCESS; } - streamMetaWUnLock(pMeta); - // drop task should not in the meta-lock, and drop the related fill-history task now if (pReq->dropRelHTask) { code = streamMetaUnregisterTask(pMeta, pReq->hStreamId, pReq->hTaskId); @@ -685,9 +681,7 @@ int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SV (int32_t)pReq->hTaskId, numOfTasks); } - streamMetaWLock(pMeta); code = streamMetaCommit(pMeta); - return TSDB_CODE_SUCCESS; } diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 318720b5b0..85f287f301 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -522,7 +522,10 @@ static int32_t doSetStreamInputBlock(SStreamTask* pTask, const void* pInput, int if (pItem->type == STREAM_INPUT__GET_RES) { const SStreamTrigger* pTrigger = (const SStreamTrigger*)pInput; code = qSetMultiStreamInput(pExecutor, pTrigger->pBlock, 1, STREAM_INPUT__DATA_BLOCK); - + if (pTask->info.trigger == STREAM_TRIGGER_FORCE_WINDOW_CLOSE) { + stDebug("s-task:%s set force_window_close as source block, skey:%"PRId64, id, pTrigger->pBlock->info.window.skey); + (*pVer) = pTrigger->pBlock->info.window.skey; + } } else if (pItem->type == STREAM_INPUT__DATA_SUBMIT) { const SStreamDataSubmit* pSubmit = (const SStreamDataSubmit*)pInput; code = qSetMultiStreamInput(pExecutor, &pSubmit->submit, 1, STREAM_INPUT__DATA_SUBMIT); @@ -671,7 +674,7 @@ static int32_t doStreamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pBlock doRecordThroughput(&pTask->execInfo, totalBlocks, totalSize, blockSize, st, pTask->id.idStr); - // update the currentVer if processing the submit blocks. + // update the currentVer if processing the submitted blocks. if (!(pInfo->checkpointVer <= pInfo->nextProcessVer && ver >= pInfo->checkpointVer)) { stError("s-task:%s invalid info, checkpointVer:%" PRId64 ", nextProcessVer:%" PRId64 " currentVer:%" PRId64, id, pInfo->checkpointVer, pInfo->nextProcessVer, ver); @@ -688,6 +691,34 @@ static int32_t doStreamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pBlock return code; } +// do nothing after sync executor state to storage backend, untill checkpoint is completed. +static int32_t doHandleChkptBlock(SStreamTask* pTask) { + int32_t code = 0; + const char* id = pTask->id.idStr; + + streamMutexLock(&pTask->lock); + SStreamTaskState pState = streamTaskGetStatus(pTask); + if (pState.state == TASK_STATUS__CK) { // todo other thread may change the status + stDebug("s-task:%s checkpoint block received, set status:%s", id, pState.name); + code = streamTaskBuildCheckpoint(pTask); // ignore this error msg, and continue + } else { // todo refactor + if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { + code = streamTaskSendCheckpointSourceRsp(pTask); + } else { + code = streamTaskSendCheckpointReadyMsg(pTask); + } + + if (code != TSDB_CODE_SUCCESS) { + // todo: let's retry send rsp to upstream/mnode + stError("s-task:%s failed to send checkpoint rsp to upstream, checkpointId:%d, code:%s", id, 0, + tstrerror(code)); + } + } + + streamMutexUnlock(&pTask->lock); + return code; +} + int32_t flushStateDataInExecutor(SStreamTask* pTask, SStreamQueueItem* pCheckpointBlock) { const char* id = pTask->id.idStr; @@ -832,36 +863,16 @@ static int32_t doStreamExecTask(SStreamTask* pTask) { } } - if (type != STREAM_INPUT__CHECKPOINT) { + if (type == STREAM_INPUT__CHECKPOINT) { + code = doHandleChkptBlock(pTask); + streamFreeQitem(pInput); + return code; + } else { code = doStreamTaskExecImpl(pTask, pInput, numOfBlocks); streamFreeQitem(pInput); if (code) { return code; } - } else { // todo other thread may change the status - // do nothing after sync executor state to storage backend, untill the vnode-level checkpoint is completed. - streamMutexLock(&pTask->lock); - SStreamTaskState pState = streamTaskGetStatus(pTask); - if (pState.state == TASK_STATUS__CK) { - stDebug("s-task:%s checkpoint block received, set status:%s", id, pState.name); - code = streamTaskBuildCheckpoint(pTask); // ignore this error msg, and continue - } else { // todo refactor - if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { - code = streamTaskSendCheckpointSourceRsp(pTask); - } else { - code = streamTaskSendCheckpointReadyMsg(pTask); - } - - if (code != TSDB_CODE_SUCCESS) { - // todo: let's retry send rsp to upstream/mnode - stError("s-task:%s failed to send checkpoint rsp to upstream, checkpointId:%d, code:%s", id, 0, - tstrerror(code)); - } - } - - streamMutexUnlock(&pTask->lock); - streamFreeQitem(pInput); - return code; } } } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index a6f87711bf..8e8e300ee6 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -99,7 +99,7 @@ int32_t metaRefMgtInit() { void metaRefMgtCleanup() { void* pIter = taosHashIterate(gMetaRefMgt.pTable, NULL); while (pIter) { - int64_t* p = *(int64_t**) pIter; + int64_t* p = *(int64_t**)pIter; taosMemoryFree(p); pIter = taosHashIterate(gMetaRefMgt.pTable, pIter); } @@ -118,14 +118,14 @@ int32_t metaRefMgtAdd(int64_t vgId, int64_t* rid) { if (p == NULL) { code = taosHashPut(gMetaRefMgt.pTable, &rid, sizeof(rid), &rid, sizeof(void*)); if (code) { - stError("vgId:%d failed to put into refId mgt, refId:%" PRId64" %p, code:%s", (int32_t)vgId, *rid, rid, + stError("vgId:%d failed to put into refId mgt, refId:%" PRId64 " %p, code:%s", (int32_t)vgId, *rid, rid, tstrerror(code)); return code; - } else { // not -// stInfo("add refId:%"PRId64" vgId:%d, %p", *rid, (int32_t)vgId, rid); + } else { // not + // stInfo("add refId:%"PRId64" vgId:%d, %p", *rid, (int32_t)vgId, rid); } } else { - stFatal("try to add refId:%"PRId64" vgId:%d, %p that already added into mgt", *rid, (int32_t) vgId, rid); + stFatal("try to add refId:%" PRId64 " vgId:%d, %p that already added into mgt", *rid, (int32_t)vgId, rid); } streamMutexUnlock(&gMetaRefMgt.mutex); @@ -292,6 +292,7 @@ int32_t streamTaskSetDb(SStreamMeta* pMeta, SStreamTask* pTask, const char* key) void* p = taskDbAddRef(*ppBackend); if (p == NULL) { stError("s-task:0x%x failed to ref backend", pTask->id.taskId); + streamMutexUnlock(&pMeta->backendMutex); return TSDB_CODE_FAILED; } @@ -501,8 +502,6 @@ _err: void streamMetaInitBackend(SStreamMeta* pMeta) { pMeta->streamBackend = streamBackendInit(pMeta->path, pMeta->chkpId, pMeta->vgId); if (pMeta->streamBackend == NULL) { - streamMetaWUnLock(pMeta); - while (1) { streamMetaWLock(pMeta); pMeta->streamBackend = streamBackendInit(pMeta->path, pMeta->chkpId, pMeta->vgId); @@ -671,7 +670,7 @@ int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask) { int64_t refId = pTask->id.refId; int32_t ret = taosRemoveRef(streamTaskRefPool, pTask->id.refId); if (ret != 0) { - stError("s-task:0x%x failed to remove ref, refId:%"PRId64, (int32_t) id[1], refId); + stError("s-task:0x%x failed to remove ref, refId:%" PRId64, (int32_t)id[1], refId); } } else { stDebug("s-task:%s vgId:%d refId:%" PRId64 " task meta save to disk", pTask->id.idStr, vgId, pTask->id.refId); @@ -729,14 +728,14 @@ int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTa int32_t ret = taosRemoveRef(streamTaskRefPool, refId); if (ret != 0) { - stError("s-task:0x%x failed to remove ref, refId:%"PRId64, (int32_t) id.taskId, refId); + stError("s-task:0x%x failed to remove ref, refId:%" PRId64, (int32_t)id.taskId, refId); } return code; } if ((code = streamMetaSaveTask(pMeta, pTask)) != 0) { int32_t unused = taosHashRemove(pMeta->pTasksMap, &id, sizeof(id)); - void* pUnused = taosArrayPop(pMeta->pTaskList); + void* pUnused = taosArrayPop(pMeta->pTaskList); int32_t ret = taosRemoveRef(streamTaskRefPool, refId); if (ret) { @@ -747,7 +746,7 @@ int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTa if ((code = streamMetaCommit(pMeta)) != 0) { int32_t unused = taosHashRemove(pMeta->pTasksMap, &id, sizeof(id)); - void* pUnused = taosArrayPop(pMeta->pTaskList); + void* pUnused = taosArrayPop(pMeta->pTaskList); int32_t ret = taosRemoveRef(streamTaskRefPool, refId); if (ret) { @@ -785,7 +784,7 @@ int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_ SStreamTask* p = taosAcquireRef(streamTaskRefPool, *pTaskRefId); if (p == NULL) { - stDebug("s-task:%x failed to acquire task refId:%"PRId64", may have been destoried", taskId, *pTaskRefId); + stDebug("s-task:%x failed to acquire task refId:%" PRId64 ", may have been destoried", taskId, *pTaskRefId); return TSDB_CODE_STREAM_TASK_NOT_EXIST; } @@ -908,8 +907,6 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t int32_t code = 0; STaskId id = {.streamId = streamId, .taskId = taskId}; - streamMetaWLock(pMeta); - code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); if (code == 0) { // desc the paused task counter @@ -950,18 +947,15 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t pTask->info.delaySchedParam = 0; } - int64_t refId = pTask->id.refId; int32_t ret = taosRemoveRef(streamTaskRefPool, refId); if (ret != 0) { - stError("s-task:0x%x failed to remove ref, refId:%"PRId64, (int32_t) id.taskId, refId); + stError("s-task:0x%x failed to remove ref, refId:%" PRId64, (int32_t)id.taskId, refId); } streamMetaReleaseTask(pMeta, pTask); - streamMetaWUnLock(pMeta); } else { stDebug("vgId:%d failed to find the task:0x%x, it may have been dropped already", vgId, taskId); - streamMetaWUnLock(pMeta); } return 0; @@ -1121,7 +1115,7 @@ void streamMetaLoadAllTasks(SStreamMeta* pMeta) { STaskId id = streamTaskGetTaskId(pTask); tFreeStreamTask(pTask); - void* px = taosArrayPush(pRecycleList, &id); + void* px = taosArrayPush(pRecycleList, &id); if (px == NULL) { stError("s-task:0x%x failed record the task into recycle list due to out of memory", taskId); } @@ -1171,7 +1165,7 @@ void streamMetaLoadAllTasks(SStreamMeta* pMeta) { continue; } - stInfo("s-task:0x%x vgId:%d set refId:%"PRId64, (int32_t) id.taskId, vgId, pTask->id.refId); + stInfo("s-task:0x%x vgId:%d set refId:%" PRId64, (int32_t)id.taskId, vgId, pTask->id.refId); if (pTask->info.fillHistory == 0) { int32_t val = atomic_add_fetch_32(&pMeta->numOfStreamTasks, 1); } diff --git a/source/libs/stream/src/streamSched.c b/source/libs/stream/src/streamSched.c index 468c5f2139..1d0c882003 100644 --- a/source/libs/stream/src/streamSched.c +++ b/source/libs/stream/src/streamSched.c @@ -192,6 +192,7 @@ static int32_t doCreateForceWindowTrigger(SStreamTask* pTask, int32_t* pNextTrig const char* id = pTask->id.idStr; int8_t precision = pTask->info.interval.precision; SStreamTrigger* pTrigger = NULL; + bool isFull = false; while (1) { code = streamCreateForcewindowTrigger(&pTrigger, pTask->info.delaySchedParam, &pTask->info.interval, @@ -214,7 +215,6 @@ static int32_t doCreateForceWindowTrigger(SStreamTask* pTask, int32_t* pNextTrig // check whether the time window gaps exist or not int64_t now = taosGetTimestamp(precision); - int64_t ekey = pTrigger->pBlock->info.window.skey + pTask->info.interval.interval; // there are gaps, needs to be filled STimeWindow w = pTrigger->pBlock->info.window; @@ -226,13 +226,18 @@ static int32_t doCreateForceWindowTrigger(SStreamTask* pTask, int32_t* pNextTrig } pTask->status.latestForceWindow = w; - if (ekey + pTask->info.watermark + pTask->info.interval.interval > now) { - int64_t prev = convertTimePrecision(*pNextTrigger, precision, TSDB_TIME_PRECISION_MILLI); + isFull = streamQueueIsFull(pTask->inputq.queue); + + if ((w.ekey + pTask->info.watermark + pTask->info.interval.interval > now) || isFull) { + int64_t prev = convertTimePrecision(*pNextTrigger, precision, TSDB_TIME_PRECISION_MILLI); + if (!isFull) { + *pNextTrigger = w.ekey + pTask->info.watermark + pTask->info.interval.interval - now; + } - *pNextTrigger = ekey + pTask->info.watermark + pTask->info.interval.interval - now; *pNextTrigger = convertTimePrecision(*pNextTrigger, precision, TSDB_TIME_PRECISION_MILLI); - stDebug("s-task:%s generate %d time window(s), trigger delay adjust from %" PRId64 " to %d", id, num, prev, - *pNextTrigger); + pTask->chkInfo.nextProcessVer = w.ekey + pTask->info.interval.interval; + stDebug("s-task:%s generate %d time window(s), trigger delay adjust from %" PRId64 " to %d, set ver:%" PRId64, id, + num, prev, *pNextTrigger, pTask->chkInfo.nextProcessVer); return code; } else { stDebug("s-task:%s gap exist for force_window_close, current force_window_skey:%" PRId64, id, w.skey); @@ -289,7 +294,7 @@ void streamTaskSchedHelper(void* param, void* tmrId) { } if (streamTaskGetStatus(pTask).state == TASK_STATUS__CK) { - nextTrigger = TRIGGER_RECHECK_INTERVAL; // retry in 10 seec + nextTrigger = TRIGGER_RECHECK_INTERVAL; // retry in 10 sec stDebug("s-task:%s in checkpoint procedure, not retrieve result, next:%dms", id, TRIGGER_RECHECK_INTERVAL); } else { if (pTask->info.trigger == STREAM_TRIGGER_FORCE_WINDOW_CLOSE && pTask->info.taskLevel == TASK_LEVEL__SOURCE) { diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 794fc346bf..a69ee5448d 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -120,7 +120,8 @@ SStreamState* streamStateOpen(const char* path, void* pTask, int64_t streamId, i SStreamTask* pStreamTask = pTask; pState->streamId = streamId; pState->taskId = taskId; - TAOS_UNUSED(tsnprintf(pState->pTdbState->idstr, sizeof(pState->pTdbState->idstr), "0x%" PRIx64 "-0x%x", pState->streamId, pState->taskId)); + TAOS_UNUSED(tsnprintf(pState->pTdbState->idstr, sizeof(pState->pTdbState->idstr), "0x%" PRIx64 "-0x%x", + pState->streamId, pState->taskId)); code = streamTaskSetDb(pStreamTask->pMeta, pTask, pState->pTdbState->idstr); QUERY_CHECK_CODE(code, lino, _end); @@ -527,7 +528,6 @@ _end: void streamStateDestroy(SStreamState* pState, bool remove) { streamFileStateDestroy(pState->pFileState); - // streamStateDestroy_rocksdb(pState, remove); tSimpleHashCleanup(pState->parNameMap); // do nothong taosMemoryFreeClear(pState->pTdbState); @@ -572,7 +572,8 @@ int32_t streamStateCountWinAddIfNotExist(SStreamState* pState, SSessionKey* pKey return getCountWinResultBuff(pState->pFileState, pKey, winCount, ppVal, pVLen, pWinCode); } -int32_t streamStateCountWinAdd(SStreamState* pState, SSessionKey* pKey, COUNT_TYPE winCount, void** pVal, int32_t* pVLen) { +int32_t streamStateCountWinAdd(SStreamState* pState, SSessionKey* pKey, COUNT_TYPE winCount, void** pVal, + int32_t* pVLen) { return createCountWinResultBuff(pState->pFileState, pKey, winCount, pVal, pVLen); } @@ -593,9 +594,7 @@ SStreamStateCur* streamStateGroupGetCur(SStreamState* pState) { return pCur; } -void streamStateGroupCurNext(SStreamStateCur* pCur) { - streamFileStateGroupCurNext(pCur); -} +void streamStateGroupCurNext(SStreamStateCur* pCur) { streamFileStateGroupCurNext(pCur); } int32_t streamStateGroupGetKVByCur(SStreamStateCur* pCur, int64_t* pKey, void** pVal, int32_t* pVLen) { if (pVal != NULL) { @@ -604,13 +603,9 @@ int32_t streamStateGroupGetKVByCur(SStreamStateCur* pCur, int64_t* pKey, void** return streamFileStateGroupGetKVByCur(pCur, pKey, pVal, pVLen); } -void streamStateClearExpiredState(SStreamState* pState) { - clearExpiredState(pState->pFileState); -} +void streamStateClearExpiredState(SStreamState* pState) { clearExpiredState(pState->pFileState); } -void streamStateSetFillInfo(SStreamState* pState) { - setFillInfo(pState->pFileState); -} +void streamStateSetFillInfo(SStreamState* pState) { setFillInfo(pState->pFileState); } int32_t streamStateGetPrev(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal, int32_t* pVLen, int32_t* pWinCode) { diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 3d37cdb560..9a03128f01 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -2901,7 +2901,7 @@ void syncNodeLogConfigInfo(SSyncNode* ths, SSyncCfg* cfg, char* str) { n += tsnprintf(buf + n, len - n, "%s", "{"); for (int i = 0; i < ths->peersEpset->numOfEps; i++) { n += tsnprintf(buf + n, len - n, "%s:%d%s", ths->peersEpset->eps[i].fqdn, ths->peersEpset->eps[i].port, - (i + 1 < ths->peersEpset->numOfEps ? ", " : "")); + (i + 1 < ths->peersEpset->numOfEps ? ", " : "")); } n += tsnprintf(buf + n, len - n, "%s", "}"); diff --git a/source/libs/tdb/src/db/tdbDb.c b/source/libs/tdb/src/db/tdbDb.c index 02ab997f69..aeec8f93f0 100644 --- a/source/libs/tdb/src/db/tdbDb.c +++ b/source/libs/tdb/src/db/tdbDb.c @@ -52,7 +52,7 @@ int32_t tdbOpen(const char *dbname, int32_t szPage, int32_t pages, TDB **ppDb, i pDb->encryptAlgorithm = encryptAlgorithm; if (encryptKey != NULL) { - strncpy(pDb->encryptKey, encryptKey, ENCRYPT_KEY_LEN); + tstrncpy(pDb->encryptKey, encryptKey, ENCRYPT_KEY_LEN + 1); } ret = tdbPCacheOpen(szPage, pages, &(pDb->pCache)); diff --git a/source/libs/tdb/src/db/tdbPager.c b/source/libs/tdb/src/db/tdbPager.c index 2753fe30d6..c2f97982f5 100644 --- a/source/libs/tdb/src/db/tdbPager.c +++ b/source/libs/tdb/src/db/tdbPager.c @@ -459,7 +459,7 @@ static char *tdbEncryptPage(SPager *pPager, char *pPageData, int32_t pageSize, c opts.source = pPageData + count; opts.result = packetData; opts.unitLen = 128; - strncpy(opts.key, encryptKey, ENCRYPT_KEY_LEN); + tstrncpy(opts.key, encryptKey, ENCRYPT_KEY_LEN + 1); int32_t newLen = CBC_Encrypt(&opts); @@ -927,7 +927,7 @@ static int tdbPagerInitPage(SPager *pPager, SPage *pPage, int (*initPage)(SPage opts.source = pPage->pData + count; opts.result = packetData; opts.unitLen = 128; - strncpy(opts.key, encryptKey, ENCRYPT_KEY_LEN); + tstrncpy(opts.key, encryptKey, ENCRYPT_KEY_LEN + 1); int newLen = CBC_Decrypt(&opts); diff --git a/source/libs/tfs/src/tfs.c b/source/libs/tfs/src/tfs.c index 51fda85f0a..2c97a9ac85 100644 --- a/source/libs/tfs/src/tfs.c +++ b/source/libs/tfs/src/tfs.c @@ -200,8 +200,8 @@ bool tfsIsSameFile(const STfsFile *pFile1, const STfsFile *pFile2) { if (pFile1->did.level != pFile2->did.level) return false; if (pFile1->did.id != pFile2->did.id) return false; char nameBuf1[TMPNAME_LEN], nameBuf2[TMPNAME_LEN]; - (void)strncpy(nameBuf1, pFile1->rname, TMPNAME_LEN); - (void)strncpy(nameBuf2, pFile2->rname, TMPNAME_LEN); + tstrncpy(nameBuf1, pFile1->rname, TMPNAME_LEN); + tstrncpy(nameBuf2, pFile2->rname, TMPNAME_LEN); nameBuf1[TMPNAME_LEN - 1] = 0; nameBuf2[TMPNAME_LEN - 1] = 0; TAOS_UNUSED(taosRealPath(nameBuf1, NULL, TMPNAME_LEN)); @@ -573,7 +573,7 @@ static int32_t tfsCheckAndFormatCfg(STfs *pTfs, SDiskCfg *pCfg) { TAOS_RETURN(TSDB_CODE_FS_INVLD_CFG); } - strncpy(pCfg->dir, dirName, TSDB_FILENAME_LEN); + tstrncpy(pCfg->dir, dirName, TSDB_FILENAME_LEN); TAOS_RETURN(0); } diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index 66ead2fd26..74fdb47d49 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -349,7 +349,7 @@ _exit: wError("vgId:%d, %s failed at line %d since %s", pWal->cfg.vgId, __func__, lino, tstrerror(code)); } - TAOS_RETURN(TSDB_CODE_SUCCESS); + TAOS_RETURN(code); } static FORCE_INLINE int32_t walCheckAndRoll(SWal *pWal) { @@ -638,7 +638,7 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy opts.source = newBody; opts.result = newBodyEncrypted; opts.unitLen = 16; - TAOS_UNUSED(strncpy((char *)opts.key, pWal->cfg.encryptKey, ENCRYPT_KEY_LEN)); + tstrncpy((char *)opts.key, pWal->cfg.encryptKey, ENCRYPT_KEY_LEN + 1); int32_t count = CBC_Encrypt(&opts); @@ -706,8 +706,13 @@ _exit: static int32_t walInitWriteFile(SWal *pWal) { TdFilePtr pIdxTFile, pLogTFile; + int64_t fileFirstVer = -1; + int32_t code = 0; SWalFileInfo *pRet = taosArrayGetLast(pWal->fileInfoSet); - int64_t fileFirstVer = pRet->firstVer; + if (pRet == NULL) { + fileFirstVer = pWal->vers.lastVer + 1; + } + fileFirstVer = pRet->firstVer; char fnameStr[WAL_FILE_LEN]; walBuildIdxName(pWal, fileFirstVer, fnameStr); @@ -723,6 +728,13 @@ static int32_t walInitWriteFile(SWal *pWal) { // switch file pWal->pIdxFile = pIdxTFile; pWal->pLogFile = pLogTFile; + if (taosArrayGetSize(pWal->fileInfoSet) == 0) { + code = walRollFileInfo(pWal); + if (code < 0) { + wError("vgId:%d, failed to roll file info while init write file since %s", pWal->cfg.vgId, terrstr()); + TAOS_RETURN(code); + } + } pWal->writeCur = taosArrayGetSize(pWal->fileInfoSet) - 1; TAOS_RETURN(TSDB_CODE_SUCCESS); diff --git a/source/os/src/osSocket.c b/source/os/src/osSocket.c index 32b1023ed7..9433a76469 100644 --- a/source/os/src/osSocket.c +++ b/source/os/src/osSocket.c @@ -233,15 +233,19 @@ int32_t taosBlockSIGPIPE() { } int32_t taosGetIpv4FromFqdn(const char *fqdn, uint32_t *ip) { + int32_t code = 0; OS_PARAM_CHECK(fqdn); OS_PARAM_CHECK(ip); + int64_t limitMs = 1000; + int64_t st = taosGetTimestampMs(), cost = 0; #ifdef WINDOWS // Initialize Winsock WSADATA wsaData; int iResult; iResult = WSAStartup(MAKEWORD(2, 2), &wsaData); if (iResult != 0) { - return TAOS_SYSTEM_WINSOCKET_ERROR(WSAGetLastError()); + code = TAOS_SYSTEM_WINSOCKET_ERROR(WSAGetLastError()); + goto _err; } #endif @@ -260,12 +264,12 @@ int32_t taosGetIpv4FromFqdn(const char *fqdn, uint32_t *ip) { inRetry = true; continue; } else if (EAI_SYSTEM == ret) { - terrno = TAOS_SYSTEM_ERROR(errno); - return terrno; + code = TAOS_SYSTEM_ERROR(errno); + goto _err; } - terrno = TAOS_SYSTEM_ERROR(errno); - return terrno; + code = TAOS_SYSTEM_ERROR(errno); + goto _err; } struct sockaddr *sa = result->ai_addr; @@ -275,8 +279,7 @@ int32_t taosGetIpv4FromFqdn(const char *fqdn, uint32_t *ip) { *ip = ia.s_addr; freeaddrinfo(result); - - return 0; + goto _err; } #else struct addrinfo hints = {0}; @@ -292,7 +295,7 @@ int32_t taosGetIpv4FromFqdn(const char *fqdn, uint32_t *ip) { struct in_addr ia = si->sin_addr; *ip = ia.s_addr; freeaddrinfo(result); - return 0; + goto _err; } else { #ifdef EAI_SYSTEM if (ret == EAI_SYSTEM) { @@ -305,9 +308,16 @@ int32_t taosGetIpv4FromFqdn(const char *fqdn, uint32_t *ip) { #endif *ip = 0xFFFFFFFF; - return TSDB_CODE_RPC_FQDN_ERROR; + code = TSDB_CODE_RPC_FQDN_ERROR; + goto _err; } #endif +_err: + cost = taosGetTimestampMs() - st; + if (cost >= limitMs) { + uWarn("get ip from fqdn:%s, cost:%" PRId64 "ms", fqdn, cost); + } + return code; } int32_t taosGetFqdn(char *fqdn) { diff --git a/source/os/src/osString.c b/source/os/src/osString.c index 0ee4f1c496..40bc464519 100644 --- a/source/os/src/osString.c +++ b/source/os/src/osString.c @@ -120,33 +120,20 @@ int32_t taosStr2int64(const char *str, int64_t *val) { if (str == NULL || val == NULL) { return TSDB_CODE_INVALID_PARA; } + errno = 0; char *endptr = NULL; int64_t ret = strtoll(str, &endptr, 10); - if (errno == ERANGE && (ret == LLONG_MAX || ret == LLONG_MIN)) { + if (errno != 0) { return TAOS_SYSTEM_ERROR(errno); - } else if (errno == EINVAL && ret == 0) { - return TSDB_CODE_INVALID_PARA; } else { + if (endptr == str) { + return TSDB_CODE_INVALID_PARA; + } *val = ret; return 0; } } -int32_t taosStr2int16(const char *str, int16_t *val) { - OS_PARAM_CHECK(str); - OS_PARAM_CHECK(val); - int64_t tmp = 0; - int32_t code = taosStr2int64(str, &tmp); - if (code) { - return code; - } else if (tmp > INT16_MAX || tmp < INT16_MIN) { - return TAOS_SYSTEM_ERROR(ERANGE); - } else { - *val = (int16_t)tmp; - return 0; - } -} - int32_t taosStr2int32(const char *str, int32_t *val) { OS_PARAM_CHECK(str); OS_PARAM_CHECK(val); @@ -161,6 +148,20 @@ int32_t taosStr2int32(const char *str, int32_t *val) { return 0; } } +int32_t taosStr2int16(const char *str, int16_t *val) { + OS_PARAM_CHECK(str); + OS_PARAM_CHECK(val); + int64_t tmp = 0; + int32_t code = taosStr2int64(str, &tmp); + if (code) { + return code; + } else if (tmp > INT16_MAX || tmp < INT16_MIN) { + return TAOS_SYSTEM_ERROR(ERANGE); + } else { + *val = (int16_t)tmp; + return 0; + } +} int32_t taosStr2int8(const char *str, int8_t *val) { OS_PARAM_CHECK(str); @@ -177,6 +178,70 @@ int32_t taosStr2int8(const char *str, int8_t *val) { } } +int32_t taosStr2Uint64(const char *str, uint64_t *val) { + if (str == NULL || val == NULL) { + return TSDB_CODE_INVALID_PARA; + } + char *endptr = NULL; + errno = 0; + uint64_t ret = strtoull(str, &endptr, 10); + + if (errno != 0) { + return TAOS_SYSTEM_ERROR(errno); + } else { + if (endptr == str) { + return TSDB_CODE_INVALID_PARA; + } + *val = ret; + return 0; + } +} + +int32_t taosStr2Uint32(const char *str, uint32_t *val) { + OS_PARAM_CHECK(str); + OS_PARAM_CHECK(val); + uint64_t tmp = 0; + int32_t code = taosStr2Uint64(str, &tmp); + if (code) { + return code; + } else if (tmp > UINT32_MAX) { + return TAOS_SYSTEM_ERROR(ERANGE); + } else { + *val = (int32_t)tmp; + return 0; + } +} + +int32_t taosStr2Uint16(const char *str, uint16_t *val) { + OS_PARAM_CHECK(str); + OS_PARAM_CHECK(val); + uint64_t tmp = 0; + int32_t code = taosStr2Uint64(str, &tmp); + if (code) { + return code; + } else if (tmp > UINT16_MAX) { + return TAOS_SYSTEM_ERROR(ERANGE); + } else { + *val = (int16_t)tmp; + return 0; + } +} + +int32_t taosStr2Uint8(const char *str, uint8_t *val) { + OS_PARAM_CHECK(str); + OS_PARAM_CHECK(val); + uint64_t tmp = 0; + int32_t code = taosStr2Uint64(str, &tmp); + if (code) { + return code; + } else if (tmp > UINT8_MAX) { + return TAOS_SYSTEM_ERROR(ERANGE); + } else { + *val = (int8_t)tmp; + return 0; + } +} + int32_t tasoUcs4Compare(TdUcs4 *f1_ucs4, TdUcs4 *f2_ucs4, int32_t bytes) { if ((f1_ucs4 == NULL || f2_ucs4 == NULL)) { return TSDB_CODE_INVALID_PARA; @@ -223,7 +288,7 @@ int32_t tasoUcs4Copy(TdUcs4 *target_ucs4, TdUcs4 *source_ucs4, int32_t len_ucs4) terrno = TSDB_CODE_INVALID_PARA; return terrno; } - + (void)memcpy(target_ucs4, source_ucs4, len_ucs4 * sizeof(TdUcs4)); return TSDB_CODE_SUCCESS; @@ -289,7 +354,7 @@ void taosConvDestroy() { } iconv_t taosAcquireConv(int32_t *idx, ConvType type) { - if(idx == NULL) { + if (idx == NULL) { terrno = TSDB_CODE_INVALID_PARA; return (iconv_t)-1; } @@ -356,7 +421,7 @@ bool taosMbsToUcs4(const char *mbs, size_t mbsLength, TdUcs4 *ucs4, int32_t ucs4 if (ucs4_max_len == 0) { return true; } - if(ucs4_max_len < 0 || mbs == NULL || ucs4 == NULL) { + if (ucs4_max_len < 0 || mbs == NULL || ucs4 == NULL) { terrno = TSDB_CODE_INVALID_PARA; return false; } @@ -372,9 +437,9 @@ bool taosMbsToUcs4(const char *mbs, size_t mbsLength, TdUcs4 *ucs4, int32_t ucs4 if ((iconv_t)-1 == conv) { return false; } - - size_t ucs4_input_len = mbsLength; - size_t outLeft = ucs4_max_len; + + size_t ucs4_input_len = mbsLength; + size_t outLeft = ucs4_max_len; if (iconv(conv, (char **)&mbs, &ucs4_input_len, (char **)&ucs4, &outLeft) == -1) { terrno = TAOS_SYSTEM_ERROR(errno); taosReleaseConv(idx, conv, M2C); @@ -401,7 +466,7 @@ int32_t taosUcs4ToMbs(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs) { if (ucs4_max_len == 0) { return 0; } - if(ucs4_max_len < 0 || ucs4 == NULL || mbs == NULL) { + if (ucs4_max_len < 0 || ucs4 == NULL || mbs == NULL) { terrno = TSDB_CODE_INVALID_PARA; return terrno; } @@ -417,18 +482,18 @@ int32_t taosUcs4ToMbs(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs) { if ((iconv_t)-1 == conv) { return terrno; } - - size_t ucs4_input_len = ucs4_max_len; - size_t outLen = ucs4_max_len; + + size_t ucs4_input_len = ucs4_max_len; + size_t outLen = ucs4_max_len; if (iconv(conv, (char **)&ucs4, &ucs4_input_len, &mbs, &outLen) == -1) { code = TAOS_SYSTEM_ERROR(errno); taosReleaseConv(idx, conv, C2M); - terrno = code; + terrno = code; return code; } - + taosReleaseConv(idx, conv, C2M); - + return (int32_t)(ucs4_max_len - outLen); #endif } @@ -439,7 +504,7 @@ int32_t taosUcs4ToMbsEx(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs, iconv_t c if (ucs4_max_len == 0) { return 0; } - if(ucs4_max_len < 0 || ucs4 == NULL || mbs == NULL) { + if (ucs4_max_len < 0 || ucs4 == NULL || mbs == NULL) { terrno = TSDB_CODE_INVALID_PARA; return terrno; } @@ -455,13 +520,13 @@ int32_t taosUcs4ToMbsEx(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs, iconv_t c terrno = TAOS_SYSTEM_ERROR(errno); return terrno; } - + return (int32_t)(ucs4_max_len - outLen); #endif } bool taosValidateEncodec(const char *encodec) { - if(encodec == NULL) { + if (encodec == NULL) { terrno = TSDB_CODE_INVALID_PARA; return false; } @@ -513,7 +578,7 @@ int32_t taosHexEncode(const unsigned char *src, char *dst, int32_t len, int32_t } int32_t taosHexDecode(const char *src, char *dst, int32_t len) { - if(!src || !dst || len <= 0) { + if (!src || !dst || len <= 0) { terrno = TSDB_CODE_INVALID_PARA; return terrno; } @@ -556,9 +621,10 @@ int32_t taosMbsToWchars(TdWchar *pWchars, const char *pStrs, int32_t size) { return mbstowcs(pWchars, pStrs, size); } -int32_t taosWcharToMb(char *pStr, TdWchar wchar) { +int32_t taosWcharToMb(char *pStr, TdWchar wchar) { OS_PARAM_CHECK(pStr); - return wctomb(pStr, wchar); } + return wctomb(pStr, wchar); +} char *taosStrCaseStr(const char *str, const char *pattern) { if (str == NULL) { @@ -580,7 +646,7 @@ char *taosStrCaseStr(const char *str, const char *pattern) { } int64_t taosStr2Int64(const char *str, char **pEnd, int32_t radix) { - if(str == NULL) { + if (str == NULL) { terrno = TSDB_CODE_INVALID_PARA; return 0; } @@ -592,7 +658,7 @@ int64_t taosStr2Int64(const char *str, char **pEnd, int32_t radix) { } uint64_t taosStr2UInt64(const char *str, char **pEnd, int32_t radix) { - if(str == NULL) { + if (str == NULL) { terrno = TSDB_CODE_INVALID_PARA; return 0; } @@ -604,7 +670,7 @@ uint64_t taosStr2UInt64(const char *str, char **pEnd, int32_t radix) { } int32_t taosStr2Int32(const char *str, char **pEnd, int32_t radix) { - if(str == NULL) { + if (str == NULL) { terrno = TSDB_CODE_INVALID_PARA; return 0; } @@ -616,7 +682,7 @@ int32_t taosStr2Int32(const char *str, char **pEnd, int32_t radix) { } uint32_t taosStr2UInt32(const char *str, char **pEnd, int32_t radix) { - if(str == NULL) { + if (str == NULL) { terrno = TSDB_CODE_INVALID_PARA; return 0; } @@ -628,7 +694,7 @@ uint32_t taosStr2UInt32(const char *str, char **pEnd, int32_t radix) { } int16_t taosStr2Int16(const char *str, char **pEnd, int32_t radix) { - if(str == NULL) { + if (str == NULL) { terrno = TSDB_CODE_INVALID_PARA; return 0; } @@ -640,7 +706,7 @@ int16_t taosStr2Int16(const char *str, char **pEnd, int32_t radix) { } uint16_t taosStr2UInt16(const char *str, char **pEnd, int32_t radix) { - if(str == NULL) { + if (str == NULL) { terrno = TSDB_CODE_INVALID_PARA; return 0; } @@ -652,7 +718,7 @@ uint16_t taosStr2UInt16(const char *str, char **pEnd, int32_t radix) { } int8_t taosStr2Int8(const char *str, char **pEnd, int32_t radix) { - if(str == NULL) { + if (str == NULL) { terrno = TSDB_CODE_INVALID_PARA; return 0; } @@ -661,7 +727,7 @@ int8_t taosStr2Int8(const char *str, char **pEnd, int32_t radix) { } uint8_t taosStr2UInt8(const char *str, char **pEnd, int32_t radix) { - if(str == NULL) { + if (str == NULL) { terrno = TSDB_CODE_INVALID_PARA; return 0; } @@ -673,7 +739,7 @@ uint8_t taosStr2UInt8(const char *str, char **pEnd, int32_t radix) { } double taosStr2Double(const char *str, char **pEnd) { - if(str == NULL) { + if (str == NULL) { terrno = TSDB_CODE_INVALID_PARA; return 0; } @@ -682,7 +748,7 @@ double taosStr2Double(const char *str, char **pEnd) { } float taosStr2Float(const char *str, char **pEnd) { - if(str == NULL) { + if (str == NULL) { terrno = TSDB_CODE_INVALID_PARA; return 0; } @@ -698,7 +764,7 @@ bool isHex(const char *z, uint32_t n) { } bool isValidateHex(const char *z, uint32_t n) { - if(!z) { + if (!z) { terrno = TSDB_CODE_INVALID_PARA; return false; } @@ -724,12 +790,12 @@ int32_t taosHex2Ascii(const char *z, uint32_t n, void **data, uint32_t *size) { } return 0; } - + uint8_t *tmp = (uint8_t *)taosMemoryCalloc(*size, 1); if (tmp == NULL) { return terrno; } - + int8_t num = 0; uint8_t *byte = tmp + *size - 1; @@ -749,7 +815,7 @@ int32_t taosHex2Ascii(const char *z, uint32_t n, void **data, uint32_t *size) { } } *data = tmp; - + return 0; } @@ -825,7 +891,7 @@ int32_t taosAscii2Hex(const char *z, uint32_t n, void **data, uint32_t *size) { if (tmp == NULL) { return terrno; } - + *data = tmp; *(tmp++) = '\\'; *(tmp++) = 'x'; @@ -834,7 +900,7 @@ int32_t taosAscii2Hex(const char *z, uint32_t n, void **data, uint32_t *size) { tmp[i * 2] = valueOf(val >> 4); tmp[i * 2 + 1] = valueOf(val & 0x0F); } - + return 0; } diff --git a/source/os/test/osStringTests.cpp b/source/os/test/osStringTests.cpp index de07d21959..36f5207346 100644 --- a/source/os/test/osStringTests.cpp +++ b/source/os/test/osStringTests.cpp @@ -154,3 +154,483 @@ TEST(osStringTests, ostsnprintfTests) { EXPECT_EQ(ret, 11); EXPECT_STREQ(buffer, "Float: 3.14"); } +TEST(osStringTests, osStr2Int64) { + int64_t val; + int32_t result; + + // 测试空指针输入 + result = taosStr2int64(NULL, &val); + assert(result == TSDB_CODE_INVALID_PARA); + + result = taosStr2int64("123", NULL); + ASSERT_NE(result, 0); + + // 测试无效输入 + result = taosStr2int64("abc", &val); + ASSERT_NE(result, 0); + + result = taosStr2int64("", &val); + ASSERT_NE(result, 0); + + char large_num[50]; + snprintf(large_num, sizeof(large_num), "%lld", LLONG_MAX); + result = taosStr2int64(large_num, &val); + assert(result == 0); + assert(val == LLONG_MAX); + + snprintf(large_num, sizeof(large_num), "%lld", LLONG_MIN); + result = taosStr2int64(large_num, &val); + assert(result == 0); + assert(val == LLONG_MIN); + + result = taosStr2int64("123abc", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 123); + + result = taosStr2int64("abc123", &val); + ASSERT_NE(result, 0); + // 测试有效的整数字符串 + result = taosStr2int64("12345", &val); + assert(result == 0); + assert(val == 12345); + + result = taosStr2int64("-12345", &val); + assert(result == 0); + assert(val == -12345); + + result = taosStr2int64("0", &val); + assert(result == 0); + assert(val == 0); + + // 测试带空格的字符串 + result = taosStr2int64(" 12345", &val); + assert(result == 0); + assert(val == 12345); + + result = taosStr2int64("12345 ", &val); + assert(result == 0); + assert(val == 12345); +} +TEST(osStringTests, osStr2int32) { + int32_t val; + int32_t result; + + // 测试空指针输入 + result = taosStr2int32(NULL, &val); + ASSERT_EQ(result, TSDB_CODE_INVALID_PARA); + + result = taosStr2int32("123", NULL); + ASSERT_EQ(result, TSDB_CODE_INVALID_PARA); + + // 测试无效输入 + result = taosStr2int32("abc", &val); + ASSERT_NE(result, 0); + + result = taosStr2int32("", &val); + ASSERT_NE(result, 0); + + // 测试超出范围的值 + char large_num[50]; + snprintf(large_num, sizeof(large_num), "%d", INT_MAX); + result = taosStr2int32(large_num, &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, INT_MAX); + + snprintf(large_num, sizeof(large_num), "%d", INT_MIN); + result = taosStr2int32(large_num, &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, INT_MIN); + + // 测试大于 INT32 范围的值 + snprintf(large_num, sizeof(large_num), "%lld", (long long)INT_MAX + 1); + result = taosStr2int32(large_num, &val); + ASSERT_EQ(result, TAOS_SYSTEM_ERROR(ERANGE)); + + snprintf(large_num, sizeof(large_num), "%lld", (long long)INT_MIN - 1); + result = taosStr2int32(large_num, &val); + ASSERT_EQ(result, TAOS_SYSTEM_ERROR(ERANGE)); + + + result = taosStr2int32("123abc", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 123); + + result = taosStr2int32("abc123", &val); + ASSERT_NE(result, 0); + + // 测试有效的整数字符串 + result = taosStr2int32("12345", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 12345); + + result = taosStr2int32("-12345", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, -12345); + + result = taosStr2int32("0", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 0); + + // 测试带空格的字符串 + result = taosStr2int32(" 12345", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 12345); + + result = taosStr2int32("12345 ", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 12345); +} + +TEST(osStringTests, taosStr2int16) { + int16_t val; + int32_t result; + + // 测试空指针输入 + result = taosStr2int16(NULL, &val); + ASSERT_EQ(result, TSDB_CODE_INVALID_PARA); + + result = taosStr2int16("123", NULL); + ASSERT_EQ(result, TSDB_CODE_INVALID_PARA); + + // 测试无效输入 + result = taosStr2int16("abc", &val); + ASSERT_NE(result, 0); + + result = taosStr2int16("", &val); + ASSERT_NE(result, 0); + + // 测试超出范围的值 + char large_num[50]; + snprintf(large_num, sizeof(large_num), "%d", INT16_MAX); + result = taosStr2int16(large_num, &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, INT16_MAX); + + snprintf(large_num, sizeof(large_num), "%d", INT16_MIN); + result = taosStr2int16(large_num, &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, INT16_MIN); + + // 测试大于 INT16 范围的值 + snprintf(large_num, sizeof(large_num), "%lld", (long long)INT16_MAX + 1); + result = taosStr2int16(large_num, &val); + ASSERT_EQ(result, TAOS_SYSTEM_ERROR(ERANGE)); + + snprintf(large_num, sizeof(large_num), "%lld", (long long)INT16_MIN - 1); + result = taosStr2int16(large_num, &val); + ASSERT_EQ(result, TAOS_SYSTEM_ERROR(ERANGE)); + + result = taosStr2int16("123abc", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 123); + + result = taosStr2int16("abc123", &val); + ASSERT_NE(result, 0); + // 测试有效的整数字符串 + result = taosStr2int16("12345", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 12345); + + result = taosStr2int16("-12345", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, -12345); + + result = taosStr2int16("0", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 0); + + // 测试带空格的字符串 + result = taosStr2int16(" 12345", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 12345); + + result = taosStr2int16("12345 ", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 12345); +} + + +TEST(osStringTests, taosStr2int8) { + int8_t val; + int32_t result; + + // 测试空指针输入 + result = taosStr2int8(NULL, &val); + ASSERT_EQ(result, TSDB_CODE_INVALID_PARA); + + result = taosStr2int8("123", NULL); + ASSERT_EQ(result, TSDB_CODE_INVALID_PARA); + + // 测试无效输入 + result = taosStr2int8("abc", &val); + ASSERT_NE(result, 0); + + result = taosStr2int8("", &val); + ASSERT_NE(result, 0); + + // 测试超出范围的值 + char large_num[50]; + snprintf(large_num, sizeof(large_num), "%d", INT8_MAX); + result = taosStr2int8(large_num, &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, INT8_MAX); + + snprintf(large_num, sizeof(large_num), "%d", INT8_MIN); + result = taosStr2int8(large_num, &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, INT8_MIN); + + // 测试大于 INT8 范围的值 + snprintf(large_num, sizeof(large_num), "%lld", (long long)INT8_MAX + 1); + result = taosStr2int8(large_num, &val); + ASSERT_EQ(result, TAOS_SYSTEM_ERROR(ERANGE)); + + snprintf(large_num, sizeof(large_num), "%lld", (long long)INT8_MIN - 1); + result = taosStr2int8(large_num, &val); + ASSERT_EQ(result, TAOS_SYSTEM_ERROR(ERANGE)); + + result = taosStr2int8("123abc", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 123); + + result = taosStr2int8("abc123", &val); + ASSERT_NE(result, 0); + + // 测试有效的整数字符串 + result = taosStr2int8("123", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 123); + + result = taosStr2int8("-123", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, -123); + + result = taosStr2int8("0", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 0); + + // 测试带空格的字符串 + result = taosStr2int8(" 123", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 123); + + result = taosStr2int8("123 ", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 123); +} + +TEST(osStringTests, osStr2Uint64) { + uint64_t val; + int32_t result; + + // 测试空指针输入 + result = taosStr2Uint64(NULL, &val); + ASSERT_EQ(result, TSDB_CODE_INVALID_PARA); + + result = taosStr2Uint64("123", NULL); + ASSERT_EQ(result, TSDB_CODE_INVALID_PARA); + + // 测试无效输入 + result = taosStr2Uint64("abc", &val); + ASSERT_NE(result, 0); + + result = taosStr2Uint64("", &val); + ASSERT_NE(result, 0); + + char large_num[50]; + snprintf(large_num, sizeof(large_num), "%llu", ULLONG_MAX); + result = taosStr2Uint64(large_num, &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, ULLONG_MAX); + + result = taosStr2Uint64("123abc", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 123); + + result = taosStr2Uint64("abc123", &val); + ASSERT_NE(result, 0); + // 测试有效的整数字符串 + result = taosStr2Uint64("12345", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 12345); + + result = taosStr2Uint64("0", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 0); + + // 测试带空格的字符串 + result = taosStr2Uint64(" 12345", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 12345); + + result = taosStr2Uint64("12345 ", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 12345); + +} + +TEST(osStringTests, taosStr2Uint32) { + uint32_t val; + int32_t result; + + // 测试空指针输入 + result = taosStr2Uint32(NULL, &val); + ASSERT_EQ(result, TSDB_CODE_INVALID_PARA); + + result = taosStr2Uint32("123", NULL); + ASSERT_EQ(result, TSDB_CODE_INVALID_PARA); + + // 测试无效输入 + result = taosStr2Uint32("abc", &val); + ASSERT_NE(result, 0); + + result = taosStr2Uint32("", &val); + ASSERT_NE(result, 0); + + // 测试超出范围的值 + char large_num[50]; + snprintf(large_num, sizeof(large_num), "%u", UINT32_MAX); + result = taosStr2Uint32(large_num, &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, UINT32_MAX); + + // 测试大于 UINT32 范围的值 + snprintf(large_num, sizeof(large_num), "%llu", (unsigned long long)UINT32_MAX + 1); + result = taosStr2Uint32(large_num, &val); + ASSERT_EQ(result, TAOS_SYSTEM_ERROR(ERANGE)); + + result = taosStr2Uint32("123abc", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 123); + + result = taosStr2Uint32("abc123", &val); + ASSERT_NE(result, 0); + // 测试有效的整数字符串 + result = taosStr2Uint32("12345", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 12345); + + result = taosStr2Uint32("0", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 0); + + // 测试带空格的字符串 + result = taosStr2Uint32(" 12345", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 12345); + + result = taosStr2Uint32("12345 ", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 12345); +} + +TEST(osStringTests, taosStr2Uint16) { + uint16_t val; + int32_t result; + + // 测试空指针输入 + result = taosStr2Uint16(NULL, &val); + ASSERT_EQ(result, TSDB_CODE_INVALID_PARA); + + result = taosStr2Uint16("123", NULL); + ASSERT_EQ(result, TSDB_CODE_INVALID_PARA); + + // 测试无效输入 + result = taosStr2Uint16("abc", &val); + ASSERT_NE(result, 0); + + result = taosStr2Uint16("", &val); + ASSERT_NE(result, 0); + + // 测试超出范围的值 + char large_num[50]; + snprintf(large_num, sizeof(large_num), "%u", UINT16_MAX); + result = taosStr2Uint16(large_num, &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, UINT16_MAX); + + // 测试大于 UINT16 范围的值 + snprintf(large_num, sizeof(large_num), "%llu", (unsigned long long)UINT16_MAX + 1); + result = taosStr2Uint16(large_num, &val); + ASSERT_EQ(result, TAOS_SYSTEM_ERROR(ERANGE)); + + result = taosStr2Uint16("123abc", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 123); + + result = taosStr2Uint16("abc123", &val); + ASSERT_NE(result, 0); + // 测试有效的整数字符串 + result = taosStr2Uint16("12345", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 12345); + + result = taosStr2Uint16("0", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 0); + + // 测试带空格的字符串 + result = taosStr2Uint16(" 12345", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 12345); + + result = taosStr2Uint16("12345 ", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 12345); +} + +TEST(osStringTests, taosStr2Uint8) { + uint8_t val; + int32_t result; + + // 测试空指针输入 + result = taosStr2Uint8(NULL, &val); + ASSERT_EQ(result, TSDB_CODE_INVALID_PARA); + + result = taosStr2Uint8("123", NULL); + ASSERT_EQ(result, TSDB_CODE_INVALID_PARA); + + // 测试无效输入 + result = taosStr2Uint8("abc", &val); + ASSERT_NE(result, 0); + + result = taosStr2Uint8("", &val); + ASSERT_NE(result, 0); + + // 测试超出范围的值 + char large_num[50]; + snprintf(large_num, sizeof(large_num), "%u", UINT8_MAX); + result = taosStr2Uint8(large_num, &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, UINT8_MAX); + + // 测试大于 UINT8 范围的值 + snprintf(large_num, sizeof(large_num), "%llu", (unsigned long long)UINT8_MAX + 1); + result = taosStr2Uint8(large_num, &val); + ASSERT_EQ(result, TAOS_SYSTEM_ERROR(ERANGE)); + + result = taosStr2Uint8("123abc", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 123); + + result = taosStr2Uint8("abc123", &val); + ASSERT_NE(result, 0); + // 测试有效的整数字符串 + result = taosStr2Uint8("123", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 123); + + result = taosStr2Uint8("0", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 0); + + // 测试带空格的字符串 + result = taosStr2Uint8(" 123", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 123); + + result = taosStr2Uint8("123 ", &val); + ASSERT_EQ(result, 0); + ASSERT_EQ(val, 123); +} + diff --git a/source/util/src/tconfig.c b/source/util/src/tconfig.c index d2a2b1fb9a..18c2d0a500 100644 --- a/source/util/src/tconfig.c +++ b/source/util/src/tconfig.c @@ -151,11 +151,13 @@ static int32_t cfgCheckAndSetDir(SConfigItem *pItem, const char *inputDir) { } static int32_t cfgSetBool(SConfigItem *pItem, const char *value, ECfgSrcType stype) { - bool tmp = false; + int32_t code = 0; + bool tmp = false; if (strcasecmp(value, "true") == 0) { tmp = true; } - if (atoi(value) > 0) { + int32_t val = 0; + if ((code = taosStr2int32(value, &val)) == 0 && val > 0) { tmp = true; } @@ -258,6 +260,7 @@ static int32_t cfgSetTimezone(SConfigItem *pItem, const char *value, ECfgSrcType static int32_t cfgSetTfsItem(SConfig *pCfg, const char *name, const char *value, const char *level, const char *primary, const char *disable, ECfgSrcType stype) { + int32_t code = 0; (void)taosThreadMutexLock(&pCfg->lock); SConfigItem *pItem = cfgGetItem(pCfg, name); @@ -278,20 +281,40 @@ static int32_t cfgSetTfsItem(SConfig *pCfg, const char *name, const char *value, SDiskCfg cfg = {0}; tstrncpy(cfg.dir, pItem->str, sizeof(cfg.dir)); - cfg.level = level ? atoi(level) : 0; - cfg.primary = primary ? atoi(primary) : 1; - cfg.disable = disable ? atoi(disable) : 0; + + if (level == NULL || strlen(level) == 0) { + cfg.level = 0; + } else { + code = taosStr2int32(level, &cfg.level); + TAOS_CHECK_GOTO(code, NULL, _err); + } + + if (primary == NULL || strlen(primary) == 0) { + cfg.primary = 1; + } else { + code = taosStr2int32(primary, &cfg.primary); + TAOS_CHECK_GOTO(code, NULL, _err); + } + + if (disable == NULL || strlen(disable) == 0) { + cfg.disable = 0; + } else { + code = taosStr2int8(disable, &cfg.disable); + TAOS_CHECK_GOTO(code, NULL, _err); + } void *ret = taosArrayPush(pItem->array, &cfg); if (ret == NULL) { - (void)taosThreadMutexUnlock(&pCfg->lock); - - TAOS_RETURN(terrno); + code = terrno; + TAOS_CHECK_GOTO(code, NULL, _err); } pItem->stype = stype; (void)taosThreadMutexUnlock(&pCfg->lock); TAOS_RETURN(TSDB_CODE_SUCCESS); +_err: + (void)taosThreadMutexUnlock(&pCfg->lock); + TAOS_RETURN(code); } static int32_t cfgUpdateDebugFlagItem(SConfig *pCfg, const char *name, bool resetArray) { @@ -315,7 +338,7 @@ static int32_t cfgUpdateDebugFlagItem(SConfig *pCfg, const char *name, bool rese if (pDebugFlagItem == NULL) return -1; if (pDebugFlagItem->array != NULL) { SLogVar logVar = {0}; - (void)strncpy(logVar.name, name, TSDB_LOG_VAR_LEN - 1); + tstrncpy(logVar.name, name, TSDB_LOG_VAR_LEN); if (NULL == taosArrayPush(pDebugFlagItem->array, &logVar)) { TAOS_RETURN(terrno); } @@ -412,6 +435,7 @@ void cfgLock(SConfig *pCfg) { void cfgUnLock(SConfig *pCfg) { (void)taosThreadMutexUnlock(&pCfg->lock); } int32_t cfgCheckRangeForDynUpdate(SConfig *pCfg, const char *name, const char *pVal, bool isServer) { + int32_t code = 0; ECfgDynType dynType = isServer ? CFG_DYN_SERVER : CFG_DYN_CLIENT; cfgLock(pCfg); @@ -443,8 +467,9 @@ int32_t cfgCheckRangeForDynUpdate(SConfig *pCfg, const char *name, const char *p } } break; case CFG_DTYPE_BOOL: { - int32_t ival = (int32_t)atoi(pVal); - if (ival != 0 && ival != 1) { + int32_t ival = 0; + code = taosStr2int32(pVal, &ival); + if (code != 0 || (ival != 0 && ival != 1)) { uError("cfg:%s, type:%s value:%d out of range[0, 1]", pItem->name, cfgDtypeStr(pItem->dtype), ival); cfgUnLock(pCfg); TAOS_RETURN(TSDB_CODE_OUT_OF_RANGE); @@ -887,9 +912,10 @@ void cfgDumpCfg(SConfig *pCfg, bool tsc, bool dump) { for (size_t j = 0; j < sz; ++j) { SDiskCfg *pCfg = taosArrayGet(pItem->array, j); if (dump) { - (void)printf("%s %s %s l:%d p:%d d:%"PRIi8"\n", src, name, pCfg->dir, pCfg->level, pCfg->primary, pCfg->disable); + (void)printf("%s %s %s l:%d p:%d d:%" PRIi8 "\n", src, name, pCfg->dir, pCfg->level, pCfg->primary, + pCfg->disable); } else { - uInfo("%s %s %s l:%d p:%d d:%"PRIi8, src, name, pCfg->dir, pCfg->level, pCfg->primary, pCfg->disable); + uInfo("%s %s %s l:%d p:%d d:%" PRIi8, src, name, pCfg->dir, pCfg->level, pCfg->primary, pCfg->disable); } } break; @@ -924,7 +950,7 @@ int32_t cfgLoadFromEnvVar(SConfig *pConfig) { name = value = value2 = value3 = value4 = NULL; olen = vlen = vlen2 = vlen3 = vlen4 = 0; - strncpy(line, *pEnv, sizeof(line) - 1); + tstrncpy(line, *pEnv, sizeof(line)); pEnv++; if (taosEnvToCfg(line, line) < 0) { uTrace("failed to convert env to cfg:%s", line); @@ -969,7 +995,7 @@ int32_t cfgLoadFromEnvCmd(SConfig *pConfig, const char **envCmd) { int32_t index = 0; if (envCmd == NULL) TAOS_RETURN(TSDB_CODE_SUCCESS); while (envCmd[index] != NULL) { - strncpy(buf, envCmd[index], sizeof(buf) - 1); + tstrncpy(buf, envCmd[index], sizeof(buf)); buf[sizeof(buf) - 1] = 0; if (taosEnvToCfg(buf, buf) < 0) { uTrace("failed to convert env to cfg:%s", buf); @@ -1420,7 +1446,7 @@ int32_t cfgGetApollUrl(const char **envCmd, const char *envFile, char *apolloUrl char **pEnv = environ; line[1023] = 0; while (*pEnv != NULL) { - strncpy(line, *pEnv, sizeof(line) - 1); + tstrncpy(line, *pEnv, sizeof(line)); pEnv++; if (strncmp(line, "TAOS_APOLLO_URL", 14) == 0) { char *p = strchr(line, '='); diff --git a/source/util/src/tutil.c b/source/util/src/tutil.c index 48338e7996..0fb8506a68 100644 --- a/source/util/src/tutil.c +++ b/source/util/src/tutil.c @@ -520,3 +520,63 @@ int32_t parseCfgReal(const char *str, float *out) { *out = val; return TSDB_CODE_SUCCESS; } + + +bool taosIsBigChar(char c) { + if (c >= 'A' && c <= 'Z') { + return true; + } else { + return false; + } +} + +bool taosIsSmallChar(char c) { + if (c >= 'a' && c <= 'z') { + return true; + } else { + return false; + } +} + +bool taosIsNumberChar(char c) { + if (c >= '0' && c <= '9') { + return true; + } else { + return false; + } +} + +bool taosIsSpecialChar(char c) { + switch (c) { + case '!': + case '@': + case '#': + case '$': + case '%': + case '^': + case '&': + case '*': + case '(': + case ')': + case '-': + case '_': + case '+': + case '=': + case '[': + case ']': + case '{': + case '}': + case ':': + case ';': + case '>': + case '<': + case '?': + case '|': + case '~': + case ',': + case '.': + return true; + default: + return false; + } +} \ No newline at end of file diff --git a/source/util/src/tversion.c b/source/util/src/tversion.c index d1a2ecf827..1a8d8f2d23 100644 --- a/source/util/src/tversion.c +++ b/source/util/src/tversion.c @@ -18,6 +18,7 @@ #include "taoserror.h" int32_t taosVersionStrToInt(const char *vstr, int32_t *vint) { + int32_t code = 0; if (vstr == NULL) { return terrno = TSDB_CODE_INVALID_VERSION_STRING; } @@ -31,7 +32,10 @@ int32_t taosVersionStrToInt(const char *vstr, int32_t *vint) { if (vstr[spos] != '.') { tmp[spos - tpos] = vstr[spos]; } else { - vnum[vpos] = atoi(tmp); + code = taosStr2int32(tmp, &vnum[vpos]); + if (code != 0) { + return code; + } memset(tmp, 0, sizeof(tmp)); vpos++; tpos = spos + 1; @@ -39,7 +43,10 @@ int32_t taosVersionStrToInt(const char *vstr, int32_t *vint) { } if ('\0' != tmp[0] && vpos < 4) { - vnum[vpos] = atoi(tmp); + code = taosStr2int32(tmp, &vnum[vpos]); + if (code != 0) { + return code; + } } if (vnum[0] <= 0) { diff --git a/tests/army/authorith/authBasic.py b/tests/army/authorith/authBasic.py index 9a453cf687..a682b08ba0 100644 --- a/tests/army/authorith/authBasic.py +++ b/tests/army/authorith/authBasic.py @@ -40,9 +40,9 @@ class TDTestCase(TBase): def test_common_user_privileges(self): self.prepare_data() # create user - self.create_user("test", "test") + self.create_user("test", "test12@#*") # check user 'test' privileges - testconn = taos.connect(user='test', password='test') + testconn = taos.connect(user='test', password='test12@#*') cursor = testconn.cursor() testSql = TDSql() testSql.init(cursor) @@ -87,9 +87,9 @@ class TDTestCase(TBase): def test_common_user_with_createdb_privileges(self): self.prepare_data() # create user - self.create_user("test", "test") + self.create_user("test", "test12@#*") # check user 'test' privileges - testconn = taos.connect(user='test', password='test') + testconn = taos.connect(user='test', password='test12@#*') cursor = testconn.cursor() testSql = TDSql() testSql.init(cursor) @@ -133,8 +133,8 @@ class TDTestCase(TBase): testSql.checkRows(2) # create another user 'test1' - self.create_user("test1", "test1") - test1conn = taos.connect(user='test1', password='test1') + self.create_user("test1", "test12@#*^%") + test1conn = taos.connect(user='test1', password='test12@#*^%') cursor1 = test1conn.cursor() test1Sql = TDSql() test1Sql.init(cursor1) diff --git a/tests/army/cluster/test_drop_table_by_uid.py b/tests/army/cluster/test_drop_table_by_uid.py index f09006b37b..3d10863fc2 100644 --- a/tests/army/cluster/test_drop_table_by_uid.py +++ b/tests/army/cluster/test_drop_table_by_uid.py @@ -305,9 +305,9 @@ class TDTestCase(TBase): """ try: # create new user and grant create database priviledge - tdSql.execute("create user test pass 'test';") + tdSql.execute("create user test pass 'ab45*&TC';") tdSql.execute("alter user test createdb 1;") - conn = taos.connect(user="test", password="test") + conn = taos.connect(user="test", password="ab45*&TC") cursor = conn.cursor() # create database and tables with new user tdLog.info("Prepare data for test case test_abnormal_drop_table_with_non_root_user") diff --git a/tests/army/query/function/test_selection_function_with_json.py b/tests/army/query/function/test_selection_function_with_json.py new file mode 100644 index 0000000000..e1f8090ae3 --- /dev/null +++ b/tests/army/query/function/test_selection_function_with_json.py @@ -0,0 +1,106 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from frame import etool +from frame.etool import * +from frame.log import * +from frame.cases import * +from frame.sql import * +from frame.caseBase import * +from frame.common import * + +class TDTestCase(TBase): + updatecfgDict = { + "keepColumnName": "1", + "ttlChangeOnWrite": "1", + "querySmaOptimize": "1", + "slowLogScope": "none", + "queryBufferSize": 10240 + } + + def insert_data(self): + tdLog.info(f"insert data.") + tdSql.execute("drop database if exists ts_5763;") + tdSql.execute("create database ts_5763;") + tdSql.execute("use ts_5763;") + tdSql.execute("select database();") + tdSql.execute("CREATE STABLE metrics (ts TIMESTAMP, v DOUBLE) TAGS (labels JSON)") + tdSql.execute("""CREATE TABLE `metrics_0` USING `metrics` (`labels`) TAGS ('{"ident":"192.168.56.167"}');""") + tdSql.execute("""CREATE TABLE `metrics_1` USING `metrics` (`labels`) TAGS ('{"ident":"192.168.56.168"}');""") + tdSql.execute("""CREATE TABLE `metrics_2` USING `metrics` (`labels`) TAGS ('{"ident":"192.168.56.169"}');""") + tdSql.execute("""CREATE TABLE `metrics_3` USING `metrics` (`labels`) TAGS ('{"ident":"192.168.56.170"}');""") + tdSql.execute("""CREATE TABLE `metrics_5` USING `metrics` (`labels`) TAGS ('{"asset_name":"中国政务网"}');""") + tdSql.execute("""CREATE TABLE `metrics_6` USING `metrics` (`labels`) TAGS ('{"asset_name":"地大物博阿拉丁快解放啦上课交电费"}');""") + tdSql.execute("""CREATE TABLE `metrics_7` USING `metrics` (`labels`) TAGS ('{"asset_name":"no1241-上的六块腹肌阿斯利康的肌肤轮廓设计大方"}');""") + tdSql.execute("""CREATE TABLE `metrics_8` USING `metrics` (`labels`) TAGS ('{"asset_name":"no1241-上的六块腹肌阿斯利康的肌肤轮廓设计大方","ident":"192.168.0.1"}');""") + tdSql.execute("""CREATE TABLE `metrics_9` USING `metrics` (`labels`) TAGS ('{"asset_name":"no1241-上的六块腹肌阿斯利康的肌肤轮廓设计大方","ident":"192.168.0.1"}');""") + tdSql.execute("""CREATE TABLE `metrics_10` USING `metrics` (`labels`) TAGS ('{"asset_name":"上的咖啡机no1241-上的六块腹肌阿斯利康的肌肤轮廓设计大方","ident":"192.168.0.1"}');""") + + tdSql.execute("insert into metrics_0 values ('2024-12-12 16:34:39.326',1)") + tdSql.execute("insert into metrics_0 values ('2024-12-12 16:34:40.891',2)") + tdSql.execute("insert into metrics_0 values ('2024-12-12 16:34:41.986',3)") + tdSql.execute("insert into metrics_0 values ('2024-12-12 16:34:42.992',4)") + tdSql.execute("insert into metrics_0 values ('2024-12-12 16:34:46.927',5)") + tdSql.execute("insert into metrics_0 values ('2024-12-12 16:34:48.473',6)") + tdSql.execute("insert into metrics_1 select * from metrics_0") + tdSql.execute("insert into metrics_2 select * from metrics_0") + tdSql.execute("insert into metrics_3 select * from metrics_0") + tdSql.execute("insert into metrics_5 select * from metrics_0") + tdSql.execute("insert into metrics_6 select * from metrics_0") + tdSql.execute("insert into metrics_7 select * from metrics_0") + tdSql.execute("insert into metrics_8 values ('2024-12-12 19:05:36.459',1)") + tdSql.execute("insert into metrics_8 values ('2024-12-12 19:05:37.388',1)") + tdSql.execute("insert into metrics_8 values ('2024-12-12 19:05:37.622',1)") + tdSql.execute("insert into metrics_8 values ('2024-12-12 19:05:37.852',1)") + tdSql.execute("insert into metrics_8 values ('2024-12-12 19:05:38.081',1)") + tdSql.execute("insert into metrics_8 values ('2024-12-12 19:05:38.307',1)") + tdSql.execute("insert into metrics_8 values ('2024-12-12 19:05:38.535',1)") + tdSql.execute("insert into metrics_8 values ('2024-12-12 19:05:38.792',1)") + tdSql.execute("insert into metrics_8 values ('2024-12-12 19:05:39.035',1)") + tdSql.execute("insert into metrics_8 values ('2024-12-12 19:05:39.240',1)") + tdSql.execute("insert into metrics_9 values ('2024-12-12 19:05:29.270',1)") + tdSql.execute("insert into metrics_9 values ('2024-12-12 19:05:30.508',1)") + tdSql.execute("insert into metrics_9 values ('2024-12-12 19:05:31.035',1)") + tdSql.execute("insert into metrics_9 values ('2024-12-12 19:05:31.523',1)") + tdSql.execute("insert into metrics_9 values ('2024-12-12 19:05:31.760',1)") + tdSql.execute("insert into metrics_9 values ('2024-12-12 19:05:32.001',1)") + tdSql.execute("insert into metrics_9 values ('2024-12-12 19:05:32.228',1)") + tdSql.execute("insert into metrics_9 values ('2024-12-12 19:05:32.453',1)") + tdSql.execute("insert into metrics_9 values ('2024-12-12 19:05:32.690',1)") + tdSql.execute("insert into metrics_9 values ('2024-12-12 19:05:32.906',1)") + tdSql.execute("insert into metrics_10 values ('2024-12-12 19:06:14.538',1)") + tdSql.execute("insert into metrics_10 values ('2024-12-12 19:06:15.114',1)") + tdSql.execute("insert into metrics_10 values ('2024-12-12 19:06:15.613',1)") + tdSql.execute("insert into metrics_10 values ('2024-12-12 19:06:15.853',1)") + tdSql.execute("insert into metrics_10 values ('2024-12-12 19:06:16.054',1)") + tdSql.execute("insert into metrics_10 values ('2024-12-12 19:06:16.295',1)") + tdSql.execute("insert into metrics_10 values ('2024-12-12 19:06:16.514',1)") + tdSql.execute("insert into metrics_10 values ('2024-12-12 19:06:16.731',1)") + tdSql.execute("insert into metrics_10 values ('2024-12-12 19:06:16.958',1)") + tdSql.execute("insert into metrics_10 values ('2024-12-12 19:06:17.176',1)") + + for i in range(1, 10): + tdSql.query("select _wstart,first(v)-last(v), first(labels->'asset_name'),first(labels->'ident'),mode(labels->'asset_name'),mode(labels->'ident'),last(labels->'asset_name'),last(labels->'ident') from ts_5763.metrics interval(1s)") + tdSql.checkRows(18) + + def run(self): + tdLog.debug(f"start to excute {__file__}") + + self.insert_data() + + + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 879d93ab3a..e82fd5a85d 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -15,6 +15,7 @@ ,,y,army,./pytest.sh python3 ./test.py -f cluster/snapshot.py -N 3 -L 3 -D 2 ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_func_elapsed.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_function.py +,,y,army,./pytest.sh python3 ./test.py -f query/function/test_selection_function_with_json.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_percentile.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_resinfo.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_interp.py diff --git a/tests/pytest/user/pass_len.py b/tests/pytest/user/pass_len.py index 346b8424fe..2324f5993f 100644 --- a/tests/pytest/user/pass_len.py +++ b/tests/pytest/user/pass_len.py @@ -26,10 +26,10 @@ class TDTestCase: def run(self): print("==============step1") try: - tdSql.execute("create user abc pass '123456'") + tdSql.execute("create user abc pass '123456rf@#'") except Exception as e: tdLog.exit(e) - print("create user abc pass '123456'") + print("create user abc pass '123456rf@#'") print("==============step2") try: diff --git a/tests/script/tsim/mnode/basic2.sim b/tests/script/tsim/mnode/basic2.sim index 5be29e88a6..71714a11b8 100644 --- a/tests/script/tsim/mnode/basic2.sim +++ b/tests/script/tsim/mnode/basic2.sim @@ -67,7 +67,7 @@ if $data(2)[2] != follower then endi print =============== create user -sql create user user1 PASS 'user1' +sql create user user1 PASS 'user1@#xy' sql select * from information_schema.ins_users if $rows != 2 then return -1 diff --git a/tests/script/tsim/mnode/basic3.sim b/tests/script/tsim/mnode/basic3.sim index ff7c44b67d..e19b4e9443 100644 --- a/tests/script/tsim/mnode/basic3.sim +++ b/tests/script/tsim/mnode/basic3.sim @@ -68,7 +68,7 @@ if $leaderExist != 1 then endi print =============== step3: create user -sql create user user1 PASS 'user1' +sql create user user1 PASS 'user121$*' sql select * from information_schema.ins_users if $rows != 2 then return -1 diff --git a/tests/script/tsim/query/udf.sim b/tests/script/tsim/query/udf.sim index fbf9d50c25..1bd55cbdfe 100644 --- a/tests/script/tsim/query/udf.sim +++ b/tests/script/tsim/query/udf.sim @@ -8,7 +8,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c udf -v 1 system sh/exec.sh -n dnode1 -s start sql connect -sql alter user root pass 'taosdata2' +sql alter user root pass '12s34(*&xx' system sh/exec.sh -n dnode1 -s stop system sh/exec.sh -n dnode1 -s start diff --git a/tests/script/tsim/sync/mnodeLeaderTransfer.sim b/tests/script/tsim/sync/mnodeLeaderTransfer.sim index ed21ac19c3..a1d364fa7c 100644 --- a/tests/script/tsim/sync/mnodeLeaderTransfer.sim +++ b/tests/script/tsim/sync/mnodeLeaderTransfer.sim @@ -33,7 +33,7 @@ sql create mnode on dnode 3 sleep 3000 print =============== create user -sql create user user1 PASS 'user1' +sql create user user1 PASS 'usersdf1$*' sql select * from information_schema.ins_users if $rows != 2 then return -1 diff --git a/tests/script/tsim/trans/lossdata1.sim b/tests/script/tsim/trans/lossdata1.sim index 82e5923468..5b29d2b193 100644 --- a/tests/script/tsim/trans/lossdata1.sim +++ b/tests/script/tsim/trans/lossdata1.sim @@ -11,8 +11,8 @@ system sh/exec.sh -n dnode1 -s start sql connect print =============== create user1 -sql create user user1 PASS 'user1' -sql create user user2 PASS 'user2' +sql create user user1 PASS 'use@##r1$*' +sql create user user2 PASS 'use&*r2$*' sql select * from information_schema.ins_users if $rows != 3 then return -1 diff --git a/tests/script/tsim/user/password.sim b/tests/script/tsim/user/password.sim index d26b9dbc2e..729097e7e1 100644 --- a/tests/script/tsim/user/password.sim +++ b/tests/script/tsim/user/password.sim @@ -4,8 +4,8 @@ system sh/exec.sh -n dnode1 -s start sql connect print ============= step1 -sql create user u_read pass 'taosdata1' -sql create user u_write pass 'taosdata1' +sql create user u_read pass 'tbx12F132!' +sql create user u_write pass 'tbx12145&*' sql alter user u_read pass 'taosdata' sql alter user u_write pass 'taosdata' @@ -15,6 +15,164 @@ if $rows != 3 then return -1 endi +# invalid password format + +sql_error create user user_p1 pass 'taosdata1' +sql_error create user user_p1 pass 'taosdata2' +sql_error create user user_p1 pass '!@#$%^&3' +sql_error create user user_p1 pass '1234564' +sql_error create user user_p1 pass 'taosdataa' +sql_error create user user_p1 pass 'taosdatab' +sql_error create user user_p1 pass '!@#$%^&c' +sql_error create user user_p1 pass '123456d' +sql_error create user user_p1 pass 'taosdataE' +sql_error create user user_p1 pass 'taosdataF' +sql_error create user user_p1 pass '!@#$%^&G' +sql_error create user user_p1 pass '12333315H' +sql_error create user user_p1 pass 'aaaaaaaat1' +sql_error create user user_p1 pass 'TTTTTTTTT2' +sql_error create user user_p1 pass '!@#$%^&!3' +sql_error create user user_p1 pass '12345654' +sql_error create user user_p1 pass 'taosdatata' +sql_error create user user_p1 pass 'TAOSDATATb' +sql_error create user user_p1 pass '!@#$%^&!c' +sql_error create user user_p1 pass '1234565d' +sql_error create user user_p1 pass 'taosdatatE' +sql_error create user user_p1 pass 'TAOSDATATF' +sql_error create user user_p1 pass '!@#$$*!G' +sql_error create user user_p1 pass '1234565H' +sql_error create user user_p1 pass 'taosdataaosdata!' +sql_error create user user_p1 pass 'taosdataaosdata@' +sql_error create user user_p1 pass '!@#$%^&@*#' +sql_error create user user_p1 pass '!@#$%^&' +sql_error create user user_p1 pass '!@#$%^&@*#@' +sql_error create user user_p1 pass '!@#$%^&@*##' +sql_error create user user_p1 pass '!@#$%^&@*#$' +sql_error create user user_p1 pass '!@#$%^&@*#%' +sql_error create user user_p1 pass '!@#$%^&@*#^' +sql_error create user user_p1 pass '!@#$%^&@*#&' +sql_error create user user_p1 pass '!@#$%^&@*#*' +sql_error create user user_p1 pass '!@#$%^&@*#(' +sql_error create user user_p1 pass '!@#$%^&@*#)' +sql_error create user user_p1 pass '!@#$%^&@*#-' +sql_error create user user_p1 pass '!@#$%^&@*#_' +sql_error create user user_p1 pass '!@#$%^&@*#+' +sql_error create user user_p1 pass '!@#$%^&@*#=' +sql_error create user user_p1 pass '!@#$%^&@*#[' +sql_error create user user_p1 pass '!@#$%^&@*#]' +sql_error create user user_p1 pass '!@#$%^&@*#{' +sql_error create user user_p1 pass '!@#$%^&@*#}' +sql_error create user user_p1 pass '!@#$%^&@*#:' +sql_error create user user_p1 pass '!@#$%^&@*#;' +sql_error create user user_p1 pass '!@#$%^&@*#>' +sql_error create user user_p1 pass '!@#$%^&@*#<' +sql_error create user user_p1 pass '!@#$%^&@*#?' +sql_error create user user_p1 pass '!@#$%^&@*#|' +sql_error create user user_p1 pass '!@#$%^&@*#~' +sql_error create user user_p1 pass '!@#$%^&@*#,' +sql_error create user user_p1 pass '!@#$%^&@*#.' +sql_error create user user_p1 pass 'tbd1234TTT\' +sql_error create user user_p1 pass 'tbd1234TTT/' +sql_error create user user_p1 pass 'tbd1234TTT`' +sql_error create user user_p1 pass 'taosdatax' +sql_error create user user_p1 pass 'taosdatay' + +sql_error create user user_p1 pass 'abcd!@1' +sql create user user_p2 pass 'abcd!@12' +sql create user user_p3 pass 'abcd!@123' +sql create user user_p4 pass 'abcd!@1234' +sql create user user_p5 pass 'abcd!@12345' +sql create user user_p6 pass 'abcd!@123456' +sql create user user_p7 pass 'abcd!@1234567' +sql create user user_p8 pass 'abcd!@123456789' +sql create user user_p9 pass 'abcd!@1234567890' +sql_error create user user_p10 pass 'abcd!@1234567890T' +sql drop user user_p2 +sql drop user user_p3 +sql drop user user_p4 +sql drop user user_p5 +sql drop user user_p6 +sql drop user user_p7 +sql drop user user_p8 +sql drop user user_p9 + +sql create user user_p1 pass 'xt12!@cd' + +sql_error alter user user_p1 pass 'abcd!@1' +sql alter user user_p1 pass 'abcd!@12' +sql alter user user_p1 pass 'abcd!@123' +sql alter user user_p1 pass 'abcd!@1234' +sql alter user user_p1 pass 'abcd!@12345' +sql alter user user_p1 pass 'abcd!@123456' +sql alter user user_p1 pass 'abcd!@1234567' +sql alter user user_p1 pass 'abcd!@123456789' +sql alter user user_p1 pass 'abcd!@1234567890' +sql_error user user_p1 pass 'abcd!@1234567890T' +sql_error alter user user_p1 pass 'taosdata1' +sql_error alter user user_p1 pass 'taosdata2' +sql_error alter user user_p1 pass '!@#$%^&3' +sql_error alter user user_p1 pass '1234564' +sql_error alter user user_p1 pass 'taosdataa' +sql_error alter user user_p1 pass 'taosdatab' +sql_error alter user user_p1 pass '!@#$%^&c' +sql_error alter user user_p1 pass '123456d' +sql_error alter user user_p1 pass 'taosdataE' +sql_error alter user user_p1 pass 'taosdataF' +sql_error alter user user_p1 pass '!@#$%^&G' +sql_error alter user user_p1 pass '12334515H' +sql_error alter user user_p1 pass 'aasfdsft1' +sql_error alter user user_p1 pass 'TAOSDATAT2' +sql_error alter user user_p1 pass '!@#$%^&!3' +sql_error alter user user_p1 pass '12345654' +sql_error alter user user_p1 pass 'taosdatata' +sql_error alter user user_p1 pass 'TAOSDATATb' +sql_error alter user user_p1 pass '!@#$%^&!c' +sql_error alter user user_p1 pass '1234565d' +sql_error alter user user_p1 pass 'taosdatatE' +sql_error alter user user_p1 pass 'TAOSDATATF' +sql_error alter user user_p1 pass '*%^^%###!G' +sql_error alter user user_p1 pass '1234565H' +sql_error alter user user_p1 pass 'taosdataaosdata!' +sql_error alter user user_p1 pass 'taosdataaosdata@' +sql_error alter user user_p1 pass '!@#$%^&@*#' +sql_error alter user user_p1 pass '!@#$%^&' +sql_error alter user user_p1 pass '!@#$%^&@*#@' +sql_error alter user user_p1 pass '!@#$%^&@*##' +sql_error alter user user_p1 pass '!@#$%^&@*#$' +sql_error alter user user_p1 pass '!@#$%^&@*#%' +sql_error alter user user_p1 pass '!@#$%^&@*#^' +sql_error alter user user_p1 pass '!@#$%^&@*#&' +sql_error alter user user_p1 pass '!@#$%^&@*#*' +sql_error alter user user_p1 pass '!@#$%^&@*#(' +sql_error alter user user_p1 pass '!@#$%^&@*#)' +sql_error alter user user_p1 pass '!@#$%^&@*#-' +sql_error alter user user_p1 pass '!@#$%^&@*#_' +sql_error alter user user_p1 pass '!@#$%^&@*#+' +sql_error alter user user_p1 pass '!@#$%^&@*#=' +sql_error alter user user_p1 pass '!@#$%^&@*#[' +sql_error alter user user_p1 pass '!@#$%^&@*#]' +sql_error alter user user_p1 pass '!@#$%^&@*#{' +sql_error alter user user_p1 pass '!@#$%^&@*#}' +sql_error alter user user_p1 pass '!@#$%^&@*#:' +sql_error alter user user_p1 pass '!@#$%^&@*#;' +sql_error alter user user_p1 pass '!@#$%^&@*#>' +sql_error alter user user_p1 pass '!@#$%^&@*#<' +sql_error alter user user_p1 pass '!@#$%^&@*#?' +sql_error alter user user_p1 pass '!@#$%^&@*#|' +sql_error alter user user_p1 pass '!@#$%^&@*#~' +sql_error alter user user_p1 pass '!@#$%^&@*#,' +sql_error alter user user_p1 pass '!@#$%^&@*#.' +sql_error alter user user_p1 pass 'tbd1234TTT\' +sql_error alter user user_p1 pass 'tbd1234TTT/' +sql_error alter user user_p1 pass 'tbd1234TTT`' +sql_error alter user user_p1 pass 'taosdatax' +sql_error alter user user_p1 pass 'taosdatay' + +sql drop user user_p1 + +sql create user user_px pass 'taosdata' +sql drop user user_px + print ============= step2 print user u_read login sql close @@ -54,7 +212,7 @@ sql create user oroot pass 'taosdata' sql_error create user $user PASS 'abcd012345678901234567891234567890abcd012345678901234567891234567890abcd012345678901234567891234567890abcd012345678901234567891234567890123' sql_error create userabcd012345678901234567891234567890abcd01234567890123456789123456789 PASS 'taosdata' sql_error create user abcd0123456789012345678901234567890111 PASS '123' -sql create user abc01234567890123456789 PASS '123' +sql create user abc01234567890123456789 PASS '123xyzYDE' sql show users if $rows != 5 then @@ -84,4 +242,34 @@ sql_error REVOKE all ON *.* from o_root; sql_error GRANT read,write ON *.* to o_root; sql_error REVOKE read,write ON *.* from o_root; + +sql create user u01 pass 'taosdata1!' +sql create user u02 pass 'taosdata1@' +sql create user u03 pass 'taosdata1#' +# sql create user u04 pass 'taosdata1$' +sql create user u05 pass 'taosdata1%' +sql create user u06 pass 'taosdata1^' +sql create user u07 pass 'taosdata1&' +sql create user u08 pass 'taosdata1*' +sql create user u09 pass 'taosdata1(' +sql create user u10 pass 'taosdata1)' +sql create user u11 pass 'taosdata1-' +sql create user u12 pass 'taosdata1_' +sql create user u13 pass 'taosdata1+' +sql create user u14 pass 'taosdata1=' +sql create user u15 pass 'taosdata1[' +sql create user u16 pass 'taosdata1]' +sql create user u17 pass 'taosdata1{' +sql create user u18 pass 'taosdata1}' +sql create user u19 pass 'taosdata1:' +sql create user u20 pass 'taosdata1;' +sql create user u21 pass 'taosdata1>' +sql create user u22 pass 'taosdata1<' +sql create user u23 pass 'taosdata1?' +sql create user u24 pass 'taosdata1|' +sql create user u25 pass 'taosdata1~' +sql create user u26 pass 'taosdata1,' +sql create user u27 pass 'taosdata1.' + +return system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/user/privilege_db.sim b/tests/script/tsim/user/privilege_db.sim index 50eaa12108..fb0f9e4566 100644 --- a/tests/script/tsim/user/privilege_db.sim +++ b/tests/script/tsim/user/privilege_db.sim @@ -17,8 +17,8 @@ if $rows != 5 then endi print =============== create users -sql create user user1 PASS 'user1' -sql create user user2 PASS 'user2' +sql create user user1 PASS '123124(*&xx)' +sql create user user2 PASS '1234(*&xx' sql select * from information_schema.ins_users if $rows != 3 then return -1 diff --git a/tests/script/tsim/user/whitelist.sim b/tests/script/tsim/user/whitelist.sim index 4722c00efa..5f98b92bda 100644 --- a/tests/script/tsim/user/whitelist.sim +++ b/tests/script/tsim/user/whitelist.sim @@ -4,8 +4,8 @@ system sh/exec.sh -n dnode1 -s start sql connect print ============= step1 -sql create user u_read pass 'taosdata1' host '127.0.0.1/24','192.168.1.0/24' -sql create user u_write pass 'taosdata1' host '127.0.0.1','192.168.1.0' +sql create user u_read pass 'taosdata1xad@#' host '127.0.0.1/24','192.168.1.0/24' +sql create user u_write pass 'taosdata1TadBD' host '127.0.0.1','192.168.1.0' sql alter user u_read add host '3.3.3.4/24' sql_error alter user u_write drop host '4.4.4.5/25' @@ -16,8 +16,8 @@ if $rows != 3 then endi print ============= step2 -sql_error create user read1 pass 'taosdata1' host '127.0.0/24' -sql_error create user write1 pass 'taosdata1' host '4.4.4.4/33' +sql_error create user read1 pass 'taosdata1XR' host '127.0.0/24' +sql_error create user write1 pass 'TZtaosdata1' host '4.4.4.4/33' sql show users if $rows != 3 then diff --git a/tests/system-test/0-others/subscribe_stream_privilege.py b/tests/system-test/0-others/subscribe_stream_privilege.py index b477af9f57..a447e43e8b 100644 --- a/tests/system-test/0-others/subscribe_stream_privilege.py +++ b/tests/system-test/0-others/subscribe_stream_privilege.py @@ -114,7 +114,7 @@ class TDTestCase: consumer_dict = { "group.id": "g1", "td.connect.user": self.user_name, - "td.connect.pass": "test", + "td.connect.pass": "123456rf@#", "auto.offset.reset": "earliest" } consumer = Consumer(consumer_dict) @@ -167,7 +167,7 @@ class TDTestCase: def create_user(self): tdSql.execute(f'create topic {self.topic_name} as database {self.dbnames[0]}') - tdSql.execute(f'create user {self.user_name} pass "test"') + tdSql.execute(f'create user {self.user_name} pass "123456rf@#"') def run(self): self.prepare_data() diff --git a/tests/system-test/0-others/taosShell.py b/tests/system-test/0-others/taosShell.py index 91e9f2fb89..b43723161f 100644 --- a/tests/system-test/0-others/taosShell.py +++ b/tests/system-test/0-others/taosShell.py @@ -158,7 +158,7 @@ class TDTestCase: def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() # time.sleep(2) - tdSql.query("create user testpy pass 'testpy'") + tdSql.query("create user testpy pass 'testpy243#@'") tdSql.query("alter user testpy createdb 1") #hostname = socket.gethostname() @@ -175,7 +175,7 @@ class TDTestCase: checkNetworkStatus = ['0: unavailable', '1: network ok', '2: service ok', '3: service degraded', '4: exiting'] netrole = ['client', 'server'] - keyDict = {'h':'', 'P':'6030', 'p':'testpy', 'u':'testpy', 'a':'', 'A':'', 'c':'', 'C':'', 's':'', 'r':'', 'f':'', \ + keyDict = {'h':'', 'P':'6030', 'p':'testpy243#@', 'u':'testpy', 'a':'', 'A':'', 'c':'', 'C':'', 's':'', 'r':'', 'f':'', \ 'k':'', 't':'', 'n':'', 'l':'1024', 'N':'100', 'V':'', 'd':'db', 'w':'30', '-help':'', '-usage':'', '?':''} keyDict['h'] = self.hostname diff --git a/tests/system-test/0-others/taosShellError.py b/tests/system-test/0-others/taosShellError.py index 5e6a590806..e7221b6409 100644 --- a/tests/system-test/0-others/taosShellError.py +++ b/tests/system-test/0-others/taosShellError.py @@ -134,7 +134,7 @@ class TDTestCase: def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() # time.sleep(2) - tdSql.query("create user testpy pass 'testpy'") + tdSql.query("create user testpy pass 'testpy243#@'") #hostname = socket.gethostname() #tdLog.info ("hostname: %s" % hostname) @@ -150,7 +150,7 @@ class TDTestCase: checkNetworkStatus = ['0: unavailable', '1: network ok', '2: service ok', '3: service degraded', '4: exiting'] netrole = ['client', 'server'] - keyDict = {'h':'', 'P':'6030', 'p':'testpy', 'u':'testpy', 'a':'', 'A':'', 'c':'', 'C':'', 's':'', 'r':'', 'f':'', \ + keyDict = {'h':'', 'P':'6030', 'p':'testpy243#@', 'u':'testpy', 'a':'', 'A':'', 'c':'', 'C':'', 's':'', 'r':'', 'f':'', \ 'k':'', 't':'', 'n':'', 'l':'1024', 'N':'100', 'V':'', 'd':'db', 'w':'30', '-help':'', '-usage':'', '?':''} keyDict['h'] = self.hostname diff --git a/tests/system-test/0-others/taosShellNetChk.py b/tests/system-test/0-others/taosShellNetChk.py index d2efa5d9fe..37d82b7e84 100644 --- a/tests/system-test/0-others/taosShellNetChk.py +++ b/tests/system-test/0-others/taosShellNetChk.py @@ -133,7 +133,7 @@ class TDTestCase: def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() - tdSql.query("create user testpy pass 'testpy'") + tdSql.query("create user testpy pass 'testpy243#@'") buildPath = self.getBuildPath() if (buildPath == ""): @@ -146,7 +146,7 @@ class TDTestCase: checkNetworkStatus = ['0: unavailable', '1: network ok', '2: service ok', '3: service degraded', '4: exiting'] netrole = ['client', 'server'] - keyDict = {'h':'', 'P':'6030', 'p':'testpy', 'u':'testpy', 'a':'', 'A':'', 'c':'', 'C':'', 's':'', 'r':'', 'f':'', \ + keyDict = {'h':'', 'P':'6030', 'p':'testpy243#@', 'u':'testpy', 'a':'', 'A':'', 'c':'', 'C':'', 's':'', 'r':'', 'f':'', \ 'k':'', 't':'', 'n':'', 'l':'1024', 'N':'100', 'V':'', 'd':'db', 'w':'30', '-help':'', '-usage':'', '?':''} keyDict['h'] = self.hostname diff --git a/tests/system-test/0-others/taosdShell.py b/tests/system-test/0-others/taosdShell.py index 9b0628ec12..8f30930957 100644 --- a/tests/system-test/0-others/taosdShell.py +++ b/tests/system-test/0-others/taosdShell.py @@ -132,7 +132,7 @@ class TDTestCase: def preData(self): # database\stb\tb\chiild-tb\rows\topics - tdSql.execute("create user testpy pass 'testpy'") + tdSql.execute("create user testpy pass 'testpy243#@'") tdSql.execute("drop database if exists db0;") tdSql.execute("create database db0 wal_retention_period 3600;") tdSql.execute("use db0;") diff --git a/tests/system-test/0-others/taosdlog.py b/tests/system-test/0-others/taosdlog.py index d4698960cd..9fec937627 100644 --- a/tests/system-test/0-others/taosdlog.py +++ b/tests/system-test/0-others/taosdlog.py @@ -34,7 +34,7 @@ class TDTestCase: def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() # time.sleep(2) - tdSql.query("create user testpy pass 'testpy'") + tdSql.query("create user testpy pass 't123#$estpy'") buildPath = self.getBuildPath() if (buildPath == ""): diff --git a/tests/system-test/0-others/user_control.py b/tests/system-test/0-others/user_control.py index c4d24582e4..33b263c155 100644 --- a/tests/system-test/0-others/user_control.py +++ b/tests/system-test/0-others/user_control.py @@ -182,14 +182,14 @@ class TDTestCase: for i in range(self.users_count): user = User() user.name = f"user_test{i}" - user.passwd = f"taosdata{i}" + user.passwd = f"taosdata@1{i}" user.db_set = set() self.users.append(user) return self.users @property def __passwd_list(self): - return [f"taosdata{i}" for i in range(self.users_count) ] + return [f"taosdata@1{i}" for i in range(self.users_count) ] @property def __privilege(self): @@ -210,34 +210,34 @@ class TDTestCase: def create_user_err(self): sqls = [ - "create users u1 pass 'u1passwd' ", - "create user '' pass 'u1passwd' ", - "create user pass 'u1passwd' ", - "create user u1 pass u1passwd ", - "create user u1 password 'u1passwd' ", - "create user u1 pass u1passwd ", + "create users u1 pass 'u1Passwd' ", + "create user '' pass 'u1Passwd' ", + "create user pass 'u1Passwd' ", + "create user u1 pass u1Passwd ", + "create user u1 password 'u1Passwd' ", + "create user u1 pass u1Passwd ", "create user u1 pass '' ", "create user u1 pass ' ' ", "create user u1 pass ", - "create user u1 u2 pass 'u1passwd' 'u2passwd' ", - "create user u1 u2 pass 'u1passwd', 'u2passwd' ", - "create user u1, u2 pass 'u1passwd', 'u2passwd' ", - "create user u1, u2 pass 'u1passwd' 'u2passwd' ", + "create user u1 u2 pass 'u1Passwd' 'u2passwd' ", + "create user u1 u2 pass 'u1Passwd', 'u2passwd' ", + "create user u1, u2 pass 'u1Passwd', 'u2passwd' ", + "create user u1, u2 pass 'u1Passwd' 'u2passwd' ", # length of user_name must <= 23 - "create user u12345678901234567890123 pass 'u1passwd' " , + "create user u12345678901234567890123 pass 'u1Passwd' " , # length of passwd must <= 128 "create user u1 pass 'u12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678' " , # password must have not " ' ~ ` \ - "create user u1 pass 'u1passwd\\' " , - "create user u1 pass 'u1passwd~' " , - "create user u1 pass 'u1passwd\"' " , - "create user u1 pass 'u1passwd\'' " , - "create user u1 pass 'u1passwd`' " , + "create user u1 pass 'u1Passwd\\' " , + "create user u1 pass 'u1Passwd~' " , + "create user u1 pass 'u1Passwd\"' " , + "create user u1 pass 'u1Passwd\'' " , + "create user u1 pass 'u1Passwd`' " , # must after create a user named u1 - "create user u1 pass 'u1passwd' " , + "create user u1 pass 'u1Passwd' " , ] - tdSql.execute("create user u1 pass 'u1passwd' ") + tdSql.execute("create user u1 pass 'u1Passwd' ") for sql in sqls: tdSql.error(sql) @@ -258,12 +258,12 @@ class TDTestCase: def alter_pass_err(self): # sourcery skip: remove-redundant-fstring sqls = [ - f"alter users {self.__user_list[0]} pass 'newpass' " , + f"alter users {self.__user_list[0]} pass 'newpassT1' " , f"alter user {self.__user_list[0]} pass '' " , f"alter user {self.__user_list[0]} pass ' ' " , - f"alter user anyuser pass 'newpass' " , + f"alter user anyuser pass 'newpassT1' " , f"alter user {self.__user_list[0]} pass " , - f"alter user {self.__user_list[0]} password 'newpass' " , + f"alter user {self.__user_list[0]} password 'newpassT1' " , ] for sql in sqls: tdSql.error(sql) @@ -649,7 +649,7 @@ class TDTestCase: # user = conn # 不能创建用户 tdLog.printNoPrefix("==========step4.1: normal user can not create user") - user.error("create use utest1 pass 'utest1pass'") + user.error("create use utest1 pass 'utest1Pass'") # 可以查看用户 tdLog.printNoPrefix("==========step4.2: normal user can show user") user.query("show users") diff --git a/tests/system-test/0-others/user_manage.py b/tests/system-test/0-others/user_manage.py index 6f90a2873a..e48231b059 100644 --- a/tests/system-test/0-others/user_manage.py +++ b/tests/system-test/0-others/user_manage.py @@ -80,16 +80,16 @@ class TDTestCase: for user_name in ['jiacy1_all', 'jiacy1_read', 'jiacy1_write', 'jiacy1_none', 'jiacy0_all', 'jiacy0_read', 'jiacy0_write', 'jiacy0_none']: if 'jiacy1' in user_name.lower(): - tdSql.execute(f'create user {user_name} pass "123" sysinfo 1') + tdSql.execute(f'create user {user_name} pass "123abc!@#" sysinfo 1') elif 'jiacy0' in user_name.lower(): - tdSql.execute(f'create user {user_name} pass "123" sysinfo 0') + tdSql.execute(f'create user {user_name} pass "123abc!@#" sysinfo 0') for user_name in ['jiacy1_all', 'jiacy1_read', 'jiacy0_all', 'jiacy0_read']: tdSql.execute(f'grant read on db to {user_name}') for user_name in ['jiacy1_all', 'jiacy1_write', 'jiacy0_all', 'jiacy0_write']: tdSql.execute(f'grant write on db to {user_name}') def user_privilege_check(self): - jiacy1_read_conn = taos.connect(user='jiacy1_read', password='123') + jiacy1_read_conn = taos.connect(user='jiacy1_read', password='123abc!@#') sql = "create table ntb (ts timestamp,c0 int)" expectErrNotOccured = True try: @@ -107,14 +107,14 @@ class TDTestCase: pass def drop_topic(self): - jiacy1_all_conn = taos.connect(user='jiacy1_all', password='123') - jiacy1_read_conn = taos.connect(user='jiacy1_read', password='123') - jiacy1_write_conn = taos.connect(user='jiacy1_write', password='123') - jiacy1_none_conn = taos.connect(user='jiacy1_none', password='123') - jiacy0_all_conn = taos.connect(user='jiacy0_all', password='123') - jiacy0_read_conn = taos.connect(user='jiacy0_read', password='123') - jiacy0_write_conn = taos.connect(user='jiacy0_write', password='123') - jiacy0_none_conn = taos.connect(user='jiacy0_none', password='123') + jiacy1_all_conn = taos.connect(user='jiacy1_all', password='123abc!@#') + jiacy1_read_conn = taos.connect(user='jiacy1_read', password='123abc!@#') + jiacy1_write_conn = taos.connect(user='jiacy1_write', password='123abc!@#') + jiacy1_none_conn = taos.connect(user='jiacy1_none', password='123abc!@#') + jiacy0_all_conn = taos.connect(user='jiacy0_all', password='123abc!@#') + jiacy0_read_conn = taos.connect(user='jiacy0_read', password='123abc!@#') + jiacy0_write_conn = taos.connect(user='jiacy0_write', password='123abc!@#') + jiacy0_none_conn = taos.connect(user='jiacy0_none', password='123abc!@#') tdSql.execute('create topic root_db as select * from db.stb') for user in [jiacy1_all_conn, jiacy1_read_conn, jiacy0_all_conn, jiacy0_read_conn]: user.execute(f'create topic db_jiacy as select * from db.stb') @@ -149,7 +149,7 @@ class TDTestCase: tdSql.execute('create topic db_topic as select * from db.stb') tdSql.execute('grant subscribe on db_topic to jiacy1_all') print("build consumer") - tmq = Consumer({"group.id": "tg2", "td.connect.user": "jiacy1_all", "td.connect.pass": "123", + tmq = Consumer({"group.id": "tg2", "td.connect.user": "jiacy1_all", "td.connect.pass": "123abc!@#", "enable.auto.commit": "true"}) print("build topic list") tmq.subscribe(["db_topic"]) diff --git a/tests/system-test/0-others/user_privilege.py b/tests/system-test/0-others/user_privilege.py index a731e85ddb..809881589a 100644 --- a/tests/system-test/0-others/user_privilege.py +++ b/tests/system-test/0-others/user_privilege.py @@ -58,7 +58,7 @@ class TDTestCase: self.stbnum_grant = 200 def create_user(self): - tdSql.execute(f'create user {self.user_name} pass "test"') + tdSql.execute(f'create user {self.user_name} pass "test123@#$"') tdSql.execute(f'grant read on {self.dbnames[0]}.{self.stbname} with t2 = "Beijing" to {self.user_name}') tdSql.execute(f'grant write on {self.dbnames[1]}.{self.stbname} with t1 = 2 to {self.user_name}') @@ -75,7 +75,7 @@ class TDTestCase: tdSql.execute(f'create table {self.stbname}_grant_{i} (ts timestamp, c0 int) tags(t0 int)') def user_read_privilege_check(self, dbname): - testconn = taos.connect(user='test', password='test') + testconn = taos.connect(user='test', password='test123@#$') expectErrNotOccured = False try: @@ -94,7 +94,7 @@ class TDTestCase: pass def user_write_privilege_check(self, dbname): - testconn = taos.connect(user='test', password='test') + testconn = taos.connect(user='test', password='test123@#$') expectErrNotOccured = False try: @@ -110,7 +110,7 @@ class TDTestCase: pass def user_privilege_error_check(self): - testconn = taos.connect(user='test', password='test') + testconn = taos.connect(user='test', password='test123@#$') expectErrNotOccured = False sql_list = [f"alter talbe {self.dbnames[0]}.stb_1 set t2 = 'Wuhan'", diff --git a/tests/system-test/0-others/user_privilege_all.py b/tests/system-test/0-others/user_privilege_all.py index 846b76317e..76f952f572 100644 --- a/tests/system-test/0-others/user_privilege_all.py +++ b/tests/system-test/0-others/user_privilege_all.py @@ -21,7 +21,7 @@ class TDTestCase: self.setsql = TDSetSql() # user info self.username = 'test' - self.password = 'test' + self.password = 'test123@#$' # db info self.dbname = "user_privilege_all_db" self.stbname = 'stb' diff --git a/tests/system-test/0-others/user_privilege_multi_users.py b/tests/system-test/0-others/user_privilege_multi_users.py index 53ff136e63..69ad32b756 100644 --- a/tests/system-test/0-others/user_privilege_multi_users.py +++ b/tests/system-test/0-others/user_privilege_multi_users.py @@ -19,7 +19,7 @@ class TDTestCase: # user info self.userNum = 100 self.basic_username = "user" - self.password = "pwd" + self.password = "test123@#$" # db info self.dbname = "user_privilege_multi_users" diff --git a/tests/system-test/0-others/user_privilege_show.py b/tests/system-test/0-others/user_privilege_show.py index 9f49778ba8..41a920967d 100644 --- a/tests/system-test/0-others/user_privilege_show.py +++ b/tests/system-test/0-others/user_privilege_show.py @@ -20,7 +20,7 @@ class TDTestCase: self.setsql = TDSetSql() # user info self.username = 'test' - self.password = 'test' + self.password = 'test123@#$' # db info self.dbname = "user_privilege_show" self.stbname = 'stb' diff --git a/tests/system-test/0-others/view/non_marterial_view/test_view.py b/tests/system-test/0-others/view/non_marterial_view/test_view.py index 3b6f774788..7e062143d2 100644 --- a/tests/system-test/0-others/view/non_marterial_view/test_view.py +++ b/tests/system-test/0-others/view/non_marterial_view/test_view.py @@ -231,7 +231,7 @@ class TDTestCase: """ self.prepare_data() username = "view_test" - password = "test" + password = "test123@#$" self.create_user(username, password) # grant all db permission to user tdSql.execute("grant all on view_db.* to view_test;") @@ -271,7 +271,7 @@ class TDTestCase: """This test case is used to verify the view permission with db write and view all """ username = "view_test" - password = "test" + password = "test123@#$" self.create_user(username, password) conn = taos.connect(user=username, password=password) self.prepare_data(conn) @@ -302,7 +302,7 @@ class TDTestCase: """This test case is used to verify the view permission with db write and view read """ username = "view_test" - password = "test" + password = "test123@#$" self.create_user(username, password) conn = taos.connect(user=username, password=password) self.prepare_data() @@ -338,7 +338,7 @@ class TDTestCase: """This test case is used to verify the view permission with db write and view alter """ username = "view_test" - password = "test" + password = "test123@#$" self.create_user(username, password) conn = taos.connect(user=username, password=password) self.prepare_data() @@ -362,7 +362,7 @@ class TDTestCase: """This test case is used to verify the view permission with db read and view all """ username = "view_test" - password = "test" + password = "test123@#$" self.create_user(username, password) conn = taos.connect(user=username, password=password) self.prepare_data() @@ -388,7 +388,7 @@ class TDTestCase: """This test case is used to verify the view permission with db read and view alter """ username = "view_test" - password = "test" + password = "test123@#$" self.create_user(username, password) conn = taos.connect(user=username, password=password) self.prepare_data() @@ -413,7 +413,7 @@ class TDTestCase: """This test case is used to verify the view permission with db read and view read """ username = "view_test" - password = "test" + password = "test123@#$" self.create_user(username, password) conn = taos.connect(user=username, password=password) self.prepare_data() diff --git a/tests/system-test/0-others/walFileIdex.py b/tests/system-test/0-others/walFileIdex.py index f8309519cd..8ef0c7c181 100644 --- a/tests/system-test/0-others/walFileIdex.py +++ b/tests/system-test/0-others/walFileIdex.py @@ -40,7 +40,7 @@ class TDTestCase: def preData(self): # database\stb\tb\chiild-tb\rows\topics - tdSql.execute("create user testpy pass 'testpy'") + tdSql.execute("create user testpy pass 'test123@#$'") tdSql.execute("drop database if exists db0;") tdSql.execute("create database db0 WAL_RETENTION_PERIOD -1 WAL_RETENTION_SIZE -1 ;") tdSql.execute("use db0;") diff --git a/tests/system-test/1-insert/boundary.py b/tests/system-test/1-insert/boundary.py index 4476236ca6..25782fd0c3 100644 --- a/tests/system-test/1-insert/boundary.py +++ b/tests/system-test/1-insert/boundary.py @@ -33,7 +33,7 @@ class TDTestCase: self.colname_length_boundary = self.boundary.COL_KEY_MAX_LENGTH self.tagname_length_boundary = self.boundary.TAG_KEY_MAX_LENGTH self.username_length_boundary = 23 - self.password_length_boundary = 31 + self.password_length_boundary = 14 def dbname_length_check(self): dbname_length = randint(1,self.dbname_length_boundary-1) for dbname in [tdCom.get_long_name(self.dbname_length_boundary),tdCom.get_long_name(dbname_length)]: @@ -120,27 +120,31 @@ class TDTestCase: def username_length_check(self): username_length = randint(1,self.username_length_boundary-1) for username in [tdCom.get_long_name(username_length),tdCom.get_long_name(self.username_length_boundary)]: - tdSql.execute(f'create user {username} pass "123"') + tdSql.execute(f'create user {username} pass "test123@#$"') tdSql.query('show users') for user in tdSql.queryResult: if user[0].lower() != 'root': tdSql.checkEqual(user[0],username) tdSql.execute(f'drop user {username}') username = tdCom.get_long_name(self.username_length_boundary+1) - tdSql.error(f'create user {username} pass "123"') + tdSql.error(f'create user {username} pass "test123@#$"') if "Name or password too long" in tdSql.error_info: tdLog.info("error info is true!") else: tdLog.exit("error info is not true") def password_length_check(self): - password_length = randint(1,self.password_length_boundary-1) + password_length = randint(8,self.password_length_boundary-1) + index = 0 for password in [tdCom.get_long_name(password_length),tdCom.get_long_name(self.password_length_boundary)]: - username = tdCom.get_long_name(3) - tdSql.execute(f'create user {username} pass "{password}"') + index += 1 + username = tdCom.get_long_name(12) + str(index) + tdSql.execute(f'create user {username} pass "{password}@1"') + index += 1 + username = tdCom.get_long_name(12) + str(index) password = tdCom.get_long_name(self.password_length_boundary+1) - tdSql.error(f'create user {username} pass "{password}"') - if "Name or password too long" in tdSql.error_info: + tdSql.error(f'create user {username} pass "{password}@1"') + if "Invalid password format" in tdSql.error_info: tdLog.info("error info is true!") else: tdLog.exit("error info is not true") diff --git a/tests/system-test/2-query/columnLenUpdated.py b/tests/system-test/2-query/columnLenUpdated.py index 4c92236fca..7470bd907c 100644 --- a/tests/system-test/2-query/columnLenUpdated.py +++ b/tests/system-test/2-query/columnLenUpdated.py @@ -104,7 +104,7 @@ class TDTestCase: def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() # time.sleep(2) - tdSql.query("create user testpy pass 'testpy'") + tdSql.query("create user testpy pass 'test123@#$'") buildPath = self.getBuildPath() if (buildPath == ""): @@ -117,7 +117,7 @@ class TDTestCase: checkNetworkStatus = ['0: unavailable', '1: network ok', '2: service ok', '3: service degraded', '4: exiting'] netrole = ['client', 'server'] - keyDict = {'h':'', 'P':'6030', 'p':'testpy', 'u':'testpy', 'a':'', 'A':'', 'c':'', 'C':'', 's':'', 'r':'', 'f':'', \ + keyDict = {'h':'', 'P':'6030', 'p':'test123@#$', 'u':'testpy', 'a':'', 'A':'', 'c':'', 'C':'', 's':'', 'r':'', 'f':'', \ 'k':'', 't':'', 'n':'', 'l':'1024', 'N':'100', 'V':'', 'd':'db', 'w':'30', '-help':'', '-usage':'', '?':''} keyDict['h'] = self.hostname diff --git a/tests/system-test/2-query/select_null.py b/tests/system-test/2-query/select_null.py index bd92e4cf5c..4a6fb3a583 100755 --- a/tests/system-test/2-query/select_null.py +++ b/tests/system-test/2-query/select_null.py @@ -500,7 +500,7 @@ class TDTestCase: tdSql.execute('create stable sel_null.join_stable(`时间戳` timestamp, c1 int) tags(`标签1` int)', queryTimes=1) tdSql.query('select a.值 from sel_null.stable1 a join sel_null.join_stable b on a.ts = 时间戳;', queryTimes=1) tdSql.query('select a.值 from sel_null.stable1 a join sel_null.join_stable b on a.ts = b.时间戳;', queryTimes=1) - tdSql.execute('create user user1 pass "asd"', queryTimes=1) + tdSql.execute('create user user1 pass "asdxtz@#12"', queryTimes=1) tdSql.execute('grant write on sel_null.stable1 with 标签1 = 1 to user1',queryTimes=1) tdSql.execute('select count(*) from sel_null.stable1 state_window(值)', queryTimes=1) diff --git a/tests/system-test/2-query/union.py b/tests/system-test/2-query/union.py index 5104489592..fc6dd4fb32 100644 --- a/tests/system-test/2-query/union.py +++ b/tests/system-test/2-query/union.py @@ -426,6 +426,15 @@ class TDTestCase: tdLog.printNoPrefix("==========step4:after wal, all check again ") self.all_test() + self.test_TD_33137() + + def test_TD_33137(self): + sql = "select 'asd' union all select 'asdasd'" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(2) + sql = "select db_name `TABLE_CAT`, '' `TABLE_SCHEM`, stable_name `TABLE_NAME`, 'TABLE' `TABLE_TYPE`, table_comment `REMARKS` from information_schema.ins_stables union all select db_name `TABLE_CAT`, '' `TABLE_SCHEM`, table_name `TABLE_NAME`, case when `type`='SYSTEM_TABLE' then 'TABLE' when `type`='NORMAL_TABLE' then 'TABLE' when `type`='CHILD_TABLE' then 'TABLE' else 'UNKNOWN' end `TABLE_TYPE`, table_comment `REMARKS` from information_schema.ins_tables union all select db_name `TABLE_CAT`, '' `TABLE_SCHEM`, view_name `TABLE_NAME`, 'VIEW' `TABLE_TYPE`, NULL `REMARKS` from information_schema.ins_views" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(47) def stop(self): tdSql.close() diff --git a/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py b/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py index 2941a643fd..e3e5d25e5d 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py +++ b/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py @@ -111,7 +111,7 @@ class TDTestCase: "batchNum": 5000 } username="user1" - passwd="123" + passwd="test123@#$" dnodeNumbers=int(dnodeNumbers) mnodeNums=int(mnodeNums) diff --git a/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertData.py b/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertData.py index 1d2644c65f..ca1bc44fbb 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertData.py +++ b/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertData.py @@ -163,15 +163,15 @@ class TDTestCase: threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]))) for i in range(5): - clusterComCreate.createUser(newTdSql,f"user{i}",f"pass{i}") - userTdSql=tdCom.newTdSql(user=f"user{i}",password=f"pass{i}") - clusterComCreate.alterUser(userTdSql,f"user{i}",f"pass{i+1}") + clusterComCreate.createUser(newTdSql,f"user{i}",f"passwd@{i}") + userTdSql=tdCom.newTdSql(user=f"user{i}",password=f"passwd@{i}") + clusterComCreate.alterUser(userTdSql,f"user{i}",f"passwd@{i+1}") clusterComCreate.deleteUser(newTdSql,f"user{i}") for j in range(5): i=100 - clusterComCreate.createUser(newTdSql,f"user{i}",f"pass{i}") - userTdSql=tdCom.newTdSql(user=f"user{i}",password=f"pass{i}") - clusterComCreate.alterUser(userTdSql,f"user{i}",f"pass{i+1}") + clusterComCreate.createUser(newTdSql,f"user{i}",f"passwd@{i}") + userTdSql=tdCom.newTdSql(user=f"user{i}",password=f"passwd@{i}") + clusterComCreate.alterUser(userTdSql,f"user{i}",f"passwd@{i+1}") clusterComCreate.deleteUser(newTdSql,f"user{i}") for tr in threads: diff --git a/tests/system-test/99-TDcase/TS-5130.py b/tests/system-test/99-TDcase/TS-5130.py index 504500fa0e..994053ac13 100644 --- a/tests/system-test/99-TDcase/TS-5130.py +++ b/tests/system-test/99-TDcase/TS-5130.py @@ -13,9 +13,9 @@ class TDTestCase: self.conn = conn tdSql.init(conn.cursor(), False) self.passwd = {'root':'taosdata', - 'test':'test'} + 'test':'test123@#$'} def prepare_user(self): - tdSql.execute(f"create user test pass 'test' sysinfo 1") + tdSql.execute(f"create user test pass 'test123@#$' sysinfo 1") def test_connect_user(self, uname): try: diff --git a/utils/test/c/sml_test.c b/utils/test/c/sml_test.c index bf04352232..d922a6454e 100644 --- a/utils/test/c/sml_test.c +++ b/utils/test/c/sml_test.c @@ -1436,7 +1436,7 @@ int sml_td22900_Test() { int sml_td24070_Test() { TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); - TAOS_RES *pRes = taos_query(taos, "CREATE user test_db pass 'test'"); + TAOS_RES *pRes = taos_query(taos, "CREATE user test_db pass 'test@123'"); ASSERT(taos_errno(pRes) == 0); taos_free_result(pRes); @@ -1460,7 +1460,7 @@ int sml_td24070_Test() { // test db privilege - taos = taos_connect("localhost", "test_db", "test", NULL, 0); + taos = taos_connect("localhost", "test_db", "test@123", NULL, 0); const char* sql[] = {"stb2,t1=1,dataModelName=t0 f1=283i32 1632299372000"}; pRes = taos_query(taos, "use td24070_read"); @@ -1491,11 +1491,11 @@ int sml_td24070_Test() { // test stable privilege taos = taos_connect("localhost", "root", "taosdata", NULL, 0); - pRes = taos_query(taos, "CREATE user test_stb_read pass 'test'"); + pRes = taos_query(taos, "CREATE user test_stb_read pass 'test@123'"); ASSERT(taos_errno(pRes) == 0); taos_free_result(pRes); - pRes = taos_query(taos, "CREATE user test_stb_write pass 'test'"); + pRes = taos_query(taos, "CREATE user test_stb_write pass 'test@123'"); ASSERT(taos_errno(pRes) == 0); taos_free_result(pRes); @@ -1508,7 +1508,7 @@ int sml_td24070_Test() { taos_free_result(pRes); taos_close(taos); - taos = taos_connect("localhost", "test_stb_read", "test", "td24070_write", 0); + taos = taos_connect("localhost", "test_stb_read", "test@123", "td24070_write", 0); const char* sql1[] = {"stb2,t1=1,dataModelName=t0 f1=283i32 1632299373000"}; pRes = taos_schemaless_insert(taos, (char **)sql1, sizeof(sql1) / sizeof(sql1[0]), TSDB_SML_LINE_PROTOCOL, @@ -1520,7 +1520,7 @@ int sml_td24070_Test() { taos_free_result(pRes); taos_close(taos); - taos = taos_connect("localhost", "test_stb_write", "test", "td24070_write", 0); + taos = taos_connect("localhost", "test_stb_write", "test@123", "td24070_write", 0); const char* sql2[] = {"stb2,t1=1,dataModelName=t0 f1=283i32 1632299373000"}; pRes = taos_schemaless_insert(taos, (char **)sql2, sizeof(sql2) / sizeof(sql2[0]), TSDB_SML_LINE_PROTOCOL, @@ -1536,11 +1536,11 @@ int sml_td24070_Test() { // test table privilege taos = taos_connect("localhost", "root", "taosdata", NULL, 0); - pRes = taos_query(taos, "CREATE user test_tb_read pass 'test'"); + pRes = taos_query(taos, "CREATE user test_tb_read pass 'test@123'"); ASSERT(taos_errno(pRes) == 0); taos_free_result(pRes); - pRes = taos_query(taos, "CREATE user test_tb_write pass 'test'"); + pRes = taos_query(taos, "CREATE user test_tb_write pass 'test@123'"); ASSERT(taos_errno(pRes) == 0); taos_free_result(pRes); @@ -1553,7 +1553,7 @@ int sml_td24070_Test() { taos_free_result(pRes); taos_close(taos); - taos = taos_connect("localhost", "test_tb_read", "test", "td24070_write", 0); + taos = taos_connect("localhost", "test_tb_read", "test@123", "td24070_write", 0); const char* sql3[] = {"stb2,t1=1,dataModelName=t0 f1=283i32 1632299374000"}; @@ -1566,7 +1566,7 @@ int sml_td24070_Test() { taos_free_result(pRes); taos_close(taos); - taos = taos_connect("localhost", "test_tb_write", "test", "td24070_write", 0); + taos = taos_connect("localhost", "test_tb_write", "test@123", "td24070_write", 0); const char* sql4[] = {"stb2,t1=1,dataModelName=t0 f1=283i32 1632299374000"}; pRes = taos_schemaless_insert(taos, (char **)sql4, sizeof(sql4) / sizeof(sql4[0]), TSDB_SML_LINE_PROTOCOL,