Merge remote-tracking branch 'origin/3.0' into fix/internal

This commit is contained in:
Simon Guan 2025-03-03 10:43:07 +08:00
commit d7207861bb
77 changed files with 1966 additions and 642 deletions

View File

@ -6,6 +6,8 @@ on:
- 'main'
- '3.0'
- '3.1'
- 'enh/cmake-TD-33848'
paths-ignore:
- 'docs/**'
- 'packaging/**'

101
.github/workflows/taosd-doc-build.yml vendored Normal file
View File

@ -0,0 +1,101 @@
name: TDengine Doc Build
on:
workflow_call:
inputs:
target_branch:
description: "Target branch name of for building the document"
required: true
type: string
target_pr_number:
description: "PR number of target branch to merge for building the document"
required: true
type: string
env:
DOC_WKC: "/root/doc_ci_work"
ZH_DOC_REPO: "docs.taosdata.com"
EN_DOC_REPO: "docs.tdengine.com"
TD_REPO: "TDengine"
TOOLS_REPO: "taos-tools"
jobs:
check:
runs-on:
group: CI
labels: [self-hosted, doc-build]
outputs:
changed_files_zh: ${{ steps.set_output.outputs.changed_files_zh }}
changed_files_en: ${{ steps.set_output.outputs.changed_files_en }}
changed_files_non_doc: ${{ steps.set_output.outputs.changed_files_non_doc }}
changed_files_non_tdgpt: ${{ steps.set_output.outputs.changed_files_non_tdgpt }}
steps:
- name: Get the latest document contents from the repository
run: |
set -e
# ./.github/scripts/update_repo.sh ${{ env.DOC_WKC }}/${{ env.TD_REPO }} ${{ inputs.target_branch }} ${{ inputs.target_pr_number }}
cd ${{ env.DOC_WKC }}/${{ env.TD_REPO }}
git reset --hard
git clean -f
git remote prune origin
git fetch
git checkout ${{ inputs.target_branch }}
git pull >/dev/null
git fetch origin +refs/pull/${{ inputs.target_pr_number }}/merge
git checkout -qf FETCH_HEAD
- name: Check whether the document is changed and set output variables
id: set_output
run: |
set -e
cd ${{ env.DOC_WKC }}/${{ env.TD_REPO }}
changed_files_zh=$(git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${{ inputs.tartget_branch }}`| grep "^docs/zh/" | tr '\n' ' ' || :)
changed_files_en=$(git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${{ inputs.tartget_branch }}`| grep "^docs/en/" | tr '\n' ' ' || :)
changed_files_non_doc=$(git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${{ inputs.tartget_branch }}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | tr '\n' ' ' || :)
changed_files_non_tdgpt=$(git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${{ inputs.tartget_branch }}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -Ev "forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics" | tr '\n' ' ' ||:)
echo "changed_files_zh=${changed_files_zh}" >> $GITHUB_OUTPUT
echo "changed_files_en=${changed_files_en}" >> $GITHUB_OUTPUT
echo "changed_files_non_doc=${changed_files_non_doc}" >> $GITHUB_OUTPUT
echo "changed_files_non_tdgpt=${changed_files_non_tdgpt}" >> $GITHUB_OUTPUT
build:
needs: check
runs-on:
group: CI
labels: [self-hosted, doc-build]
if: ${{ needs.check.outputs.changed_files_zh != '' || needs.check.outputs.changed_files_en != '' }}
steps:
- name: Get the latest document contents
run: |
set -e
#./.github/scripts/update_repo.sh ${{ env.DOC_WKC }}/${{ env.TD_REPO }} ${{ inputs.target_branch }} ${{ inputs.target_pr_number }}
cd ${{ env.DOC_WKC }}/${{ env.TD_REPO }}
git reset --hard
git clean -f
git remote prune origin
git fetch
git checkout ${{ inputs.target_branch }}
git pull >/dev/null
git fetch origin +refs/pull/${{ inputs.target_pr_number }}/merge
git checkout -qf FETCH_HEAD
- name: Build the chinese document
if: ${{ needs.check.outputs.changed_files_zh != '' }}
run: |
cd ${{ env.DOC_WKC }}/${{ env.ZH_DOC_REPO }}
yarn ass local
yarn build
- name: Build the english document
if: ${{ needs.check.outputs.changed_files_en != '' }}
run: |
cd ${{ env.DOC_WKC }}/${{ env.EN_DOC_REPO }}
yarn ass local
yarn build
outputs:
changed_files_zh: ${{ needs.check.outputs.changed_files_zh }}
changed_files_en: ${{ needs.check.outputs.changed_files_en }}
changed_files_non_doc: ${{ needs.check.outputs.changed_files_non_doc }}
changed_files_non_tdgpt: ${{ needs.check.outputs.changed_files_non_tdgpt }}

View File

@ -20,9 +20,9 @@ if(${BUILD_WITH_SQLITE})
add_subdirectory(sqlite)
endif(${BUILD_WITH_SQLITE})
if(${BUILD_S3})
add_subdirectory(azure)
endif()
# if(${BUILD_S3})
# add_subdirectory(azure)
# endif()
add_subdirectory(tdev)
add_subdirectory(lz4)

View File

@ -16,8 +16,8 @@ TDengine is designed for various writing scenarios, and many of these scenarios
### Syntax
```sql
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'];
COMPACT [db_name.]VGROUPS IN (vgroup_id1, vgroup_id2, ...) [start with 'XXXX'] [end with 'YYYY'];
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'] [META_ONLY];
COMPACT [db_name.]VGROUPS IN (vgroup_id1, vgroup_id2, ...) [start with 'XXXX'] [end with 'YYYY'] [META_ONLY];
SHOW COMPACTS;
SHOW COMPACT compact_id;
KILL COMPACT compact_id;
@ -30,6 +30,7 @@ KILL COMPACT compact_id;
- COMPACT will merge multiple STT files
- You can specify the start time of the COMPACT data with the start with keyword
- You can specify the end time of the COMPACT data with the end with keyword
- You can specify the META_ONLY keyword to only compact the meta data which are not compacted by default
- The COMPACT command will return the ID of the COMPACT task
- COMPACT tasks are executed asynchronously in the background, and you can view the progress of COMPACT tasks using the SHOW COMPACTS command
- The SHOW command will return the ID of the COMPACT task, and you can terminate the COMPACT task using the KILL COMPACT command

View File

@ -22,8 +22,6 @@ Through the `TDengine Java connector`, Seeq can easily support querying time-ser
## Configure Data Source
### Configuration of JDBC Connector
**Step 1**, Check the data storage location
```shell
@ -42,9 +40,13 @@ sudo seeq restart
Use a browser to visit ip:34216 and follow the instructions to enter the license.
## Load TDengine Time-Series Data
## Data Analysis
This chapter demonstrates how to use the Seeq software to load TDengine time-series data.
### Scenario Introduction
The example scenario is a power system where users collect electricity usage data from power station instruments daily and store it in the TDengine cluster. Now, users want to predict how power consumption will develop and purchase more equipment to support it. User power consumption varies with monthly orders, and considering seasonal changes, power consumption will differ. This city is located in the northern hemisphere, so more electricity is used in summer. We simulate data to reflect these assumptions.
### Data preparation
**Step 1**, Create tables in TDengine.
@ -246,12 +248,6 @@ The source code is hosted on [GitHub Repository](https://github.com/sangshuduo/t
}
```
## Data Analysis
### Scenario Introduction
The example scenario is a power system where users collect electricity usage data from power station instruments daily and store it in the TDengine cluster. Now, users want to predict how power consumption will develop and purchase more equipment to support it. User power consumption varies with monthly orders, and considering seasonal changes, power consumption will differ. This city is located in the northern hemisphere, so more electricity is used in summer. We simulate data to reflect these assumptions.
### Using Seeq Workbench
Log in to the Seeq service page and create a new Seeq Workbench. By selecting data sources from search results and choosing different tools as needed, you can display data or make predictions. For detailed usage methods, refer to the [official knowledge base](https://support.seeq.com/space/KB/146440193/Seeq+Workbench).

View File

@ -12,31 +12,34 @@ Through the Python connector of TDengine, Superset can support TDengine data sou
## Prerequisites
Prepare the following environment:
- TDengine is installed and running normally (both Enterprise and Community versions are available)
- taosAdapter is running normally, refer to [taosAdapter](../../../tdengine-reference/components/taosadapter/)
- Apache Superset version 2.1.0 or above is already installed, refre to [Apache Superset](https://superset.apache.org/)
## Install TDengine Python Connector
- TDengine 3.2.3.0 and above version is installed and running normally (both Enterprise and Community versions are available).
- taosAdapter is running normally, refer to [taosAdapter](../../../tdengine-reference/components/taosadapter/).
- Apache Superset version 2.1.0 or above is already installed, refre to [Apache Superset](https://superset.apache.org/).
- Install Python connector driver, refer to [Python Client Library](../../../tdengine-reference/client-libraries/python).
:::tip
The Python connector of TDengine comes with a connection driver that supports Superset in versions 2.1.18 and later, which will be automatically installed in the Superset directory and provide data source services.
The connection uses the WebSocket protocol, so it is necessary to install the `taos-ws-py` component of TDengine separately. The complete installation script is as follows:
```bash
pip3 install taospy
pip3 install taos-ws-py
```
:::
## Configure TDengine Connection In Superset
## Configure Data Source
**Step 1**, enter the new database connection page, [Superset] -> [Setting] -> [Database Connections] -> [+DATABASE].
**Step 2**, select TDengine database connection, select the `TDengine` option from the drop-down list of [SUPPORTED DATABASES].
**Step 1**, enter the new database connection page, "Superset" → "Setting" → "Database Connections" → "+DATABASE"
**Step 2**, select TDengine database connection, select the "TDengine" option from the drop-down list of "SUPPORTED DATABASES".
:::tip
If there is no TDengine option in the drop-down list, please confirm that the steps of installing, `Superset` is first and `Python Connector` is second.
:::
**Step 3**, write a name of connection in "DISPLAY NAME"
**Step 4**, The "SQLALCHEMY URL" field is a key connection information string, and it must be filled in correctly
**Step 3**, write a name of connection in [DISPLAY NAME].
**Step 4**, The [SQLALCHEMY URL] field is a key connection information string, and it must be filled in correctly.
```bash
taosws://user:password@host:port
```
| Parameter | <center>Parameter Description</center> |
|:---------- |:--------------------------------------------------------- |
|user | Username for logging into TDengine database |
@ -44,32 +47,34 @@ taosws://user:password@host:port
|host | Name of the host where the TDengine database is located |
|port | The port that provides WebSocket services, default is 6041 |
Example:
The TDengine database installed on this machine provides WebSocket service port 6041, using the default username and password, "SQLALCHEMY URL" is:
Example:
The TDengine database installed on this machine provides WebSocket service port 6041, using the default username and password, `SQLALCHEMY URL` is:
```bash
taosws://root:taosdata@localhost:6041
```
**Step 5**, configure the connection string, click "TEST CONNECTION" to test if the connection can be successful. After passing the test, click the "CONNECT" button to complete the connection
**Step 5**, configure the connection string, click "TEST CONNECTION" to test if the connection can be successful. After passing the test, click the "CONNECT" button to complete the connection.
## Data Analysis
## Start
### Data preparation
There is no difference in the use of TDengine data source compared to other data sources. Here is a brief introduction to basic data queries:
1. Click the "+" button in the upper right corner of the Superset interface, select "SQL query", and enter the query interface
2. Select the "TDengine" data source that has been created earlier from the dropdown list of "DATABASES" in the upper left corner
3. Select the name of the database to be operated on from the drop-down list of "SCHEMA" (system libraries are not displayed)
4. "SEE TABLE SCHEMA" select the name of the super table or regular table to be operated on (sub tables are not displayed)
5. Subsequently, the schema information of the selected table will be displayed in the following area
6. In the SQL editor area, any SQL statement that conforms to TDengine syntax can be entered for execution
There is no difference in the use of TDengine data source compared to other data sources. Here is a brief introduction to basic data queries:
## Example
1. Click the [+] button in the upper right corner of the Superset interface, select [SQL query], and enter the query interface.
2. Select the `TDengine` data source that has been created earlier from the dropdown list of [DATABASES] in the upper left corner.
3. Select the name of the database to be operated on from the drop-down list of [SCHEMA] (system libraries are not displayed).
4. [SEE TABLE SCHEMA] select the name of the super table or regular table to be operated on (sub tables are not displayed).
5. Subsequently, the schema information of the selected table will be displayed in the following area.
6. In the `SQL` editor area, any `SQL` statement that conforms to `TDengine` syntax can be entered for execution.
We chose two popular templates from the Superset Chart template to showcase their effects, using smart meter data as an example:
### Smart Meter Example
1. "Aggregate" Type, which displays the maximum voltage value collected per minute during the specified time period in Group 4
We chose two popular templates from the [Superset Chart] template to showcase their effects, using smart meter data as an example:
![superset-demo1](./superset-demo1.jpeg)
2. "RAW RECORDS" Type, which displays the collected values of current and voltage during the specified time period in Group 4
![superset-demo2](./superset-demo2.jpeg)
1. `Aggregate` Type, which displays the maximum voltage value collected per minute during the specified time period in Group 4.
![superset-demo1](./superset-demo1.jpeg)
2. `RAW RECORDS` Type, which displays the collected values of current and voltage during the specified time period in Group 4.
![superset-demo2](./superset-demo2.jpeg)

View File

@ -84,12 +84,12 @@ After modifying configuration file parameters, you need to restart the *taosd* s
|Parameter Name |Supported Version |Dynamic Modification|Description|
|-----------------------|-------------------------|--------------------|------------|
|timezone | |Not supported |Time zone; defaults to dynamically obtaining the current time zone setting from the system|
|locale | |Not supported |System locale information and encoding format, defaults to obtaining from the system|
|charset | |Not supported |Character set encoding, defaults to obtaining from the system|
|timezone | | since 3.1.0.0 |Time zone; defaults to dynamically obtaining the current time zone setting from the system|
|locale | | since 3.1.0.0 |System locale information and encoding format, defaults to obtaining from the system|
|charset | | since 3.1.0.0 |Character set encoding, defaults to obtaining from the system|
:::info
#### Explanation of Regional Related Parameters
1. To address the issue of data writing and querying across multiple time zones, TDengine uses Unix Timestamps to record and store timestamps. The nature of Unix Timestamps ensures that the timestamps generated are consistent at any given moment across any time zone. It is important to note that the conversion to Unix Timestamps is done on the client side. To ensure that other forms of time on the client are correctly converted to Unix Timestamps, it is necessary to set the correct time zone.
On Linux/macOS, the client automatically reads the time zone information set by the system. Users can also set the time zone in the configuration file in various ways. For example:
@ -534,29 +534,23 @@ The `taosd_vnodes_role` table records virtual node role information.
| duration | VARCHAR | tag | SQL execution duration, value range: 3-10s, 10-100s, 100-1000s, 1000s- |
| cluster_id | VARCHAR | tag | cluster id |
## Log Related
### taos\_slow\_sql\_detail 表
TDengine records the system's operational status through log files, helping users monitor the system's condition and troubleshoot issues. This section mainly introduces the related explanations of two system logs: taosc and taosd.
`taos_slow_sql_detail` records slow query detail information.The rule of the table name is `{user}_{db}_{ip}_clusterId_{cluster_id}`
TDengine's log files mainly include two types: normal logs and slow logs.
1. Normal Log Behavior Explanation
1. Multiple client processes can be started on the same machine, so the client log naming convention is taoslogX.Y, where X is a number, either empty or from 0 to 9, and Y is a suffix, either 0 or 1.
2. Only one server process can exist on the same machine. Therefore, the server log naming convention is taosdlog.Y, where Y is a suffix, either 0 or 1.
The rules for determining the number and suffix are as follows (assuming the log path is /var/log/taos/):
1. Determining the number: Use 10 numbers as the log naming convention, /var/log/taos/taoslog0.Y - /var/log/taos/taoslog9.Y, check each number sequentially to find the first unused number as the log file number for that process. If all 10 numbers are used by processes, do not use a number, i.e., /var/log/taos/taoslog.Y, and all processes write to the same file (number is empty).
2. Determining the suffix: 0 or 1. For example, if the number is determined to be 3, the alternative log file names would be /var/log/taos/taoslog3.0 /var/log/taos/taoslog3.1. If both files do not exist, use suffix 0; if one exists and the other does not, use the existing suffix. If both exist, use the suffix of the file that was modified most recently.
3. If the log file exceeds the configured number of lines numOfLogLines, it will switch suffixes and continue logging, e.g., /var/log/taos/taoslog3.0 is full, switch to /var/log/taos/taoslog3.1 to continue logging. /var/log/taos/taoslog3.0 will be renamed with a timestamp suffix and compressed for storage (handled by an asynchronous thread).
4. Control how many days log files are kept through the configuration logKeepDays, logs older than a certain number of days will be deleted when new logs are compressed and stored. It is not based on natural days.
In addition to recording normal logs, SQL statements that take longer than the configured time will be recorded in the slow logs. Slow log files are mainly used for analyzing system performance and troubleshooting performance issues.
2. Slow Log Behavior Explanation
1. Slow logs are recorded both locally in slow log files and sent to taosKeeper for structured storage via taosAdapter (monitor switch must be turned on).
2. Slow log file storage rules are:
1. One slow log file per day; if there are no slow logs for the day, there is no file for that day.
2. The file name is taosSlowLog.yyyy-mm-dd (taosSlowLog.2024-08-02), and the log storage path is configured through logDir.
3. Logs from multiple clients are stored in the same taosSlowLog.yyyy.mm.dd file under the respective log path.
4. Slow log files are not automatically deleted or compressed.
5. Uses the same three parameters as normal log files: logDir, minimalLogDirGB, asyncLog. The other two parameters, numOfLogLines and logKeepDays, do not apply to slow logs.
| field | type | is\_tag | comment |
| :------------- | :-------- | :------ | :---------------------------------------------------- |
| start\_ts | TIMESTAMP | | sql start exec time in client, ms,primary key |
| request\_id | UINT64_T | | sql request id, random hash |
| query\_time | INT32_T | | sql exec time, ms |
| code | INT32_T | | sql return code, 0 success |
| error\_info | VARCHAR | | error info if sql exec failed |
| type | INT8_T | | sql type1-query, 2-insert, 4-others |
| rows\_num | INT64_T | | sql result rows num |
| sql | VARCHAR | | sql sting |
| process\_name | VARCHAR | | process name |
| process\_id | VARCHAR | | process id |
| db | VARCHAR | TAG | which db the sql belong to |
| user | VARCHAR | TAG | the user that exec this sql |
| ip | VARCHAR | TAG | the client ip that exec this sql |
| cluster\_id | VARCHAR | TAG | cluster id |

View File

@ -23,11 +23,11 @@ The list of keywords is as follows:
| ALIVE | |
| ALL | |
| ALTER | |
| ANALYZE | Version 3.3.4.3 and later |
| ANALYZE | 3.3.4.3+ |
| AND | |
| ANODE | Version 3.3.4.3 and later |
| ANODES | Version 3.3.4.3 and later |
| ANOMALY_WINDOW | Version 3.3.4.3 and later |
| ANODE | 3.3.4.3+ |
| ANODES | 3.3.4.3+ |
| ANOMALY_WINDOW | 3.3.4.3+ |
| ANTI | |
| APPS | |
| ARBGROUPS | |
@ -37,6 +37,8 @@ The list of keywords is as follows:
| ASOF | |
| AT_ONCE | |
| ATTACH | |
| AUTO | 3.3.5.0+ |
| ASSIGN | 3.3.6.0+ |
### B
@ -78,12 +80,16 @@ The list of keywords is as follows:
| CLIENT_VERSION | |
| CLUSTER | |
| COLON | |
| COLS | 3.3.6.0+ |
| COLUMN | |
| COMMA | |
| COMMENT | |
| COMP | |
| COMPACT | |
| COMPACTS | |
| COMPACT_INTERVAL | 3.3.5.0+ |
| COMPACT_TIME_OFFSET | 3.3.5.0+ |
| COMPACT_TIME_RANGE | 3.3.5.0+ |
| CONCAT | |
| CONFLICT | |
| CONNECTION | |
@ -114,6 +120,7 @@ The list of keywords is as follows:
| DESC | |
| DESCRIBE | |
| DETACH | |
| DISK_INFO | 3.3.5.0+ |
| DISTINCT | |
| DISTRIBUTED | |
| DIVIDE | |
@ -148,19 +155,19 @@ The list of keywords is as follows:
|Keyword|Description|
|----------------------|-|
| FAIL | |
| FHIGH | Version 3.3.4.3 and later |
| FHIGH | 3.3.4.3+ |
| FILE | |
| FILL | |
| FILL_HISTORY | |
| FIRST | |
| FLOAT | |
| FLOW | Version 3.3.4.3 and later |
| FLOW | 3.3.4.3+ |
| FLUSH | |
| FOR | |
| FORCE | |
| FORCE_WINDOW_CLOSE | Version 3.3.4.3 and later |
| FORCE_WINDOW_CLOSE | 3.3.4.3+ |
| FROM | |
| FROWTS | Version 3.3.4.3 and later |
| FROWTS | 3.3.4.3+ |
| FULL | |
| FUNCTION | |
| FUNCTIONS | |
@ -209,6 +216,7 @@ The list of keywords is as follows:
| INTO | |
| IPTOKEN | |
| IROWTS | |
| IROWTS_ORIGIN | 3.3.5.0+ |
| IS | |
| IS_IMPORT | |
| ISFILLED | |
@ -242,6 +250,7 @@ The list of keywords is as follows:
| LEADER | |
| LEADING | |
| LEFT | |
| LEVEL | 3.3.0.0 - 3.3.2.11 |
| LICENCES | |
| LIKE | |
| LIMIT | |
@ -263,6 +272,7 @@ The list of keywords is as follows:
| MEDIUMBLOB | |
| MERGE | |
| META | |
| META_ONLY | 3.3.6.0+ |
| MINROWS | |
| MINUS | |
| MNODE | |
@ -281,6 +291,8 @@ The list of keywords is as follows:
| NONE | |
| NORMAL | |
| NOT | |
| NOTIFY | 3.3.6.0+ |
| NOTIFY_HISTORY | 3.3.6.0+ |
| NOTNULL | |
| NOW | |
| NULL | |
@ -295,6 +307,7 @@ The list of keywords is as follows:
| OFFSET | |
| ON | |
| ONLY | |
| ON_FAILURE | 3.3.6.0+ |
| OR | |
| ORDER | |
| OUTER | |
@ -345,6 +358,7 @@ The list of keywords is as follows:
| RATIO | |
| READ | |
| RECURSIVE | |
| REGEXP | 3.3.6.0+ |
| REDISTRIBUTE | |
| REM | |
| REPLACE | |
@ -418,7 +432,7 @@ The list of keywords is as follows:
| TABLE_PREFIX | |
| TABLE_SUFFIX | |
| TABLES | |
| tag | |
| TAG | |
| TAGS | |
| TBNAME | |
| THEN | |
@ -435,6 +449,7 @@ The list of keywords is as follows:
| TRANSACTIONS | |
| TRIGGER | |
| TRIM | |
| TRUE_FOR | 3.3.6.0+ |
| TSDB_PAGESIZE | |
| TSERIES | |
| TSMA | |

View File

@ -71,7 +71,10 @@ WebSocket Connector Historical Versions:
|WebSocket Connector Version | Major Changes | TDengine Version|
| ----------------------- | -------------------------------------------------------------------------------------------------- | ----------------- |
|0.3.5 | Added support for VARBINARY and GEOMETRY types, fixed known issues. | 3.3.0.0 and higher|
|0.3.9 | Fix the problem of incomplete data retrieval when customizing the number of rows with the "fetchmany" method. | - |
|0.3.8 | Supported connecting SuperSet to the TDengine cloud service instance. | - |
|0.3.5 | Fixed the issues in the crypto provider. | - |
|0.3.4 | Supported varbinary and geometry data type. | 3.3.0.0 and higher |
|0.3.2 | Optimize WebSocket SQL query and insertion performance, modify readme and documentation, fix known issues. | 3.2.3.0 and higher|
|0.2.9 | Known issue fixes. | - |
|0.2.5 | 1. Data subscription supports obtaining and resetting consumption progress. <br/>2 Support schemaless. <br/>3 Support STMT. | - |

View File

@ -0,0 +1,14 @@
---
title: Product Roadmap
---
The 2025 roadmap for TDengine OSS is described in the following table.
| Quarter | Feature |
| :----- | :----- |
| 2025Q1 | <ol><li>Virtual tables</li><li>Query engine: conditional expressions in <code>REGEXP</code>, <code>GREATEST</code>, <code>LEAST</code>, and <code>CAST</code> functions; improvements in single-row selection functions; time range interpolation with <code>INTERP</code></li><li>Storage engine: support for writing query results into supertables; <code>KEEP</code> parameter for supertables; performance improvements for the parameter binding interface</li><li>Stream processing: support for virtual tables; decreased compute resource usage; new mechanism for event notification; faster stream creation</li><li>Data types: support for the decimal data type</li><li>High availability: faster recovery from downtime; improved client failover</li><li>Stability: LTS release TDengine 3.3.6.x</li><li>JDBC driver: more efficient data ingestion</li><li>Ecosystem: integration with Microsoft Excel</li></ol> |
| 2025Q2 | <ol><li>Query engine: relaxed restrictions on <code>JOIN</code> queries; support for all mathematical functions in MySQL; integral, integral average, and continuous variance functions; optimization of the <code>CSUM</code> function; support for <code>COUNT(DISTINCT)</code> syntax; enhancements to event windows; faster filtering by tag; faster <code>INTERP</code> queries</li><li>Storage engine: decreased compute resource usage for TSMAs; improved write jitter</li><li>Stream processing: high availability of snodes</li><li>Data types: support for the blob data type</li><li>Data subscription: support for the MQTT protocol</li><li>High availability: faster replica configuration changes; faster recovery from downtime for clusters; improved data recovery after power outage</li><li>Observability: diagnostic tool for data ingestion</li></ol> |
| 2025Q3 | <ol><li>Query engine: more subqueries; support for all operators in MySQL; support for all time functions in MySQL; improved window calculation; reduced jitter in query performance; support for specifying columns in count windows</li><li>Storage engine: faster ingestion in SQL mode</li><li>Observability: diagnostic tool for queries; improved <code>EXPLAIN</code> output; monitoring of long-running tasks</li></ol> |
| 2025Q4 | <ol><li>Query engine: window functions (i.e. the <code>OVER</code> clause); support for all string, aggregation, and conditional functions in MySQL; sorting within groups for partition queries; controls for query resource usage; faster aggregate queries on subtables; time range interpolation in <code>INTERVAL</code> windows</li><li>Data types: support for variable-length strings</li><li>Caching: faster row-oriented caching</li><li>Observability: more insight into operations and maintenance</li></ol> |
For more information, see [TDengine Public Roadmap](https://github.com/orgs/taosdata/projects/4).

View File

@ -111,7 +111,7 @@ TDengine 还支持直接向超级表写入数据。需要注意的是,超级
```sql
insert into meters (tbname, ts, current, voltage, phase, location, group_id)
values( "d1001, "2018-10-03 14:38:05", 10.2, 220, 0.23, "California.SanFrancisco", 2)
values("d1001", "2018-10-03 14:38:05", 10.2, 220, 0.23, "California.SanFrancisco", 2)
```
### 零代码写入

View File

@ -17,8 +17,8 @@ TDengine 面向多种写入场景而很多写入场景下TDengine 的存
### 语法
```SQL
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'];
COMPACT [db_name.]VGROUPS IN (vgroup_id1, vgroup_id2, ...) [start with 'XXXX'] [end with 'YYYY'];
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'] [META_ONLY];
COMPACT [db_name.]VGROUPS IN (vgroup_id1, vgroup_id2, ...) [start with 'XXXX'] [end with 'YYYY'] [META_ONLY];
SHOW COMPACTS;
SHOW COMPACT compact_id;
KILL COMPACT compact_id;
@ -32,6 +32,7 @@ KILL COMPACT compact_id;
- COMPACT 会合并多个 STT 文件
- 可通过 start with 关键字指定 COMPACT 数据的起始时间
- 可通过 end with 关键字指定 COMPACT 数据的终止时间
- 可通过 `META_ONLY` 关键字指定只 compact 元数据。元数据默认情况下不会 compact。
- COMPACT 命令会返回 COMPACT 任务的 ID
- COMPACT 任务会在后台异步执行,可以通过 SHOW COMPACTS 命令查看 COMPACT 任务的进度
- SHOW 命令会返回 COMPACT 任务的 ID可以通过 KILL COMPACT 命令终止 COMPACT 任务

View File

@ -67,11 +67,15 @@ alter database <dbname> replica 2|1
| 异常场景 | 集群状态 |
| ------- | ------ |
| 没有 Vnode 发生故障: Arbitrator 故障Mnode 宕机节点超过一个,导致 Mnode 无法选主)| **持续提供服务** |
| 没有 Vnode 发生故障Arbitrator 故障Mnode 宕机节点超过一个,导致 Mnode 无法选主)| **持续提供服务** |
| 仅一个 Vnode 故障VGroup 已经达成同步后,某一个 Vnode 才发生故障的 | **持续提供服务** |
| 仅一个 Vnode 故障2 个 Vnode 同时故障,故障前 VGroup 达成同步,但是只有一个 Vnode 从故障中恢复服务,另一个 Vnode 服务故障 | **通过下面的命令,强制指定 leader, 继续提供服务** |
| 仅一个 Vnode 故障:离线 Vnode 启动后VGroup 未达成同步前,另一个 Vnode 服务故障的 | **无法提供服务** |
| 两个 Vnode 都发生故障 | **无法提供服务** |
```sql
ASSIGN LEADER FORCE;
```
## 常见问题

View File

@ -12,7 +12,7 @@ Power BI 是由 Microsoft 提供的一种商业分析工具。通过配置使用
- TDengine 3.3.4.0 以上版本集群已部署并正常运行(企业及社区版均可)。
- taosAdapter 能够正常运行,详细参考 [taosAdapter 参考手册](../../../reference/components/taosadapter)。
- 从 TDengine 官网下载最新的 Windows 操作系统 X64 客户端驱动程序并进行安装,详细参考 [安装 ODBC 驱动](../../../reference/connector/odbc/#安装)。
- 安装完成 Power BI Desktop 软件并运行如未安装请从其官方地址下载最新的Windows操作系统 32/64 位版本)。
- 安装完成 Power BI Desktop 软件并运行(如未安装,请从其官方地址下载最新的 Windows 操作系统 32/64 位版本)。
## 配置数据源
@ -29,8 +29,8 @@ Power BI 是由 Microsoft 提供的一种商业分析工具。通过配置使用
### 使用说明
为了充分发挥 Power BI 在分析 TDengine中 数据方面的优势,用户需要先理解维度、度量、窗口切分查询、数据切分查询、时序和相关性等核心概念,之后通过自定义的 SQL 导入数据。
- 维度:通常是分类(文本)数据,描述设备、测点、型号等类别信息。在 TDengine 的超级表中,使用标签列存储数据的维度信息,可以通过形如 “select distinct tbname, tag1, tag2 from supertable” 的SQL语法快速获得维度信息。
- 度量可以用于进行计算的定量数值字段常见计算有求和、取平均值和最小值等。如果测点的采集周期为1s那么一年就有 3000 多万条记录,把这些数据全部导入 Power BI 会严重影响其执行效率。在 TDengine 中用户可以使用数据切分查询、窗口切分查询等语法结合与窗口相关的伪列把降采样后的数据导入Power BI 中,具体语法请参阅 TDengine 官方文档的特色查询功能部分。
- 维度:通常是分类(文本)数据,描述设备、测点、型号等类别信息。在 TDengine 的超级表中,使用标签列存储数据的维度信息,可以通过形如 `select distinct tbname, tag1, tag2 from supertable` 的 SQL 语法快速获得维度信息。
- 度量可以用于进行计算的定量数值字段常见计算有求和、取平均值和最小值等。如果测点的采集周期为1s那么一年就有 3000 多万条记录,把这些数据全部导入 Power BI 会严重影响其执行效率。在 TDengine 中,用户可以使用数据切分查询、窗口切分查询等语法,结合与窗口相关的伪列,把降采样后的数据导入 Power BI 中,具体语法请参阅 TDengine 官方文档的特色查询功能部分。
- 窗口切分查询:比如温度传感器每秒采集一次数据,但须查询每隔 10min 的温度平均值,在这种场景下可以使用窗口子句来获得需要的降采样查询结果,对应的 SQL 形如 `select tbname, _wstart dateavg(temperature) temp from table interval(10m)`,其中,`_wstart` 是伪列表示时间窗口起始时间10m 表示时间窗口的持续时间,`avg(temperature)` 表示时间窗口内的聚合值。
- 数据切分查询:如果需要同时获取很多温度传感器的聚合数值,可对数据进行切分,然后在切分出的数据空间内进行一系列的计算,对应的 SQL 形如 `partition by part_list`。数据切分子句最常见的用法是在超级表查询中按标签将子表数据进行切分,将每个子表的数据独立出来,形成一条条独立的时间序列,方便针对各种时序场景的统计分析。
- 时序:在绘制曲线或者按照时间聚合数据时,通常需要引入日期表。日期表可以从 Excel 表格中导入,也可以在 TDengine 中执行 SQL 获取,例如 `select _wstart date, count(*) cnt from test.meters where ts between A and B interval(1d) fill(0)`,其中 fill 字句表示数据缺失情况下的填充模式,伪列 _wstart 则为要获取的日期列。
@ -46,7 +46,7 @@ TDengine 采用了一种独特的数据模型,以优化时序数据的存储
根据如下步骤,便可以体验通过 Power BI 生成时序数据报表的功能。
**第 1 步**,使用 TDengine 的 taosBenchMark 快速生成1000块智能电表3天的数据采集频率为 1s。
**第 1 步**,使用 TDengine 的 taosBenchMark 快速生成 1000 块智能电表 3 天的数据,采集频率为 1s。
```shell
taosBenchmark -t 1000 -n 259200 -S 1000 -y

View File

@ -16,7 +16,7 @@ toc_max_heading_level: 4
- TDengine 3.3.2.0 以上版本集群已部署并正常运行(企业及社区版均可)。
- taosAdapter 能够正常运行,详细参考 [taosAdapter 参考手册](../../../reference/components/taosadapter)。
- 确保永洪 BI 已经安装并运行(如果未安装,请到永洪科技官方下载页面下载)。
- 安装JDBC驱动。从 maven.org 下载 TDengine JDBC 连接器文件 `taos-jdbcdriver-3.4.0-dist.jar` 及以上版本。
- 安装 JDBC 驱动。从 maven.org 下载 TDengine JDBC 连接器文件 `taos-jdbcdriver-3.4.0-dist.jar` 及以上版本。
## 配置数据源

View File

@ -18,8 +18,6 @@ Seeq 是制造业和工业互联网IIOT高级分析软件。Seeq 支持在
## 配置数据源
### 配置 JDBC 连接器
**第 1 步**,查看 data 存储位置
```
@ -38,9 +36,13 @@ sudo seeq restart
使用浏览器访问 ip:34216 并按照说明输入 license。
### 加载 TDengine 时序数据
## 数据分析
本章节演示如何使用 Seeq 软件加载 TDengine 时序数据。
### 场景介绍
示例场景为一个电力系统,用户每天从电站仪表收集用电量数据,并将其存储在 TDengine 集群中。现在用户想要预测电力消耗将会如何发展,并购买更多设备来支持它。用户电力消耗随着每月订单变化而不同,另外考虑到季节变化,电力消耗量会有所不同。这个城市位于北半球,所以在夏天会使用更多的电力。我们模拟数据来反映这些假定。
### 数据准备
**第 1 步**,在 TDengine 中创建表。
@ -240,12 +242,6 @@ taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from
}
```
## 数据分析
### 场景介绍
示例场景为一个电力系统,用户每天从电站仪表收集用电量数据,并将其存储在 TDengine 集群中。现在用户想要预测电力消耗将会如何发展,并购买更多设备来支持它。用户电力消耗随着每月订单变化而不同,另外考虑到季节变化,电力消耗量会有所不同。这个城市位于北半球,所以在夏天会使用更多的电力。我们模拟数据来反映这些假定。
### 使用 Seeq Workbench
登录 Seeq 服务页面并新建 Seeq Workbench通过选择数据源搜索结果和根据需要选择不同的工具可以进行数据展现或预测详细使用方法参见 [官方知识库](https://support.seeq.com/space/KB/146440193/Seeq+Workbench)。

View File

@ -4,38 +4,39 @@ title: 与 Superset 集成
---
Apache Superset 是一个现代的企业级商业智能BIWeb 应用程序,主要用于数据探索和可视化。它由 Apache 软件基金会支持是一个开源项目它拥有活跃的社区和丰富的生态系统。Apache Superset 提供了直观的用户界面,使得创建、分享和可视化数据变得简单,同时支持多种数据源和丰富的可视化选项‌。
通过 TDengine 的 Python 连接器, Apache Superset 可支持 TDengine 数据源并提供数据展现、分析等功能
通过 TDengine 的 Python 连接器, Apache Superset 可支持 TDengine 数据源并提供数据展现、分析等功能
## 前置条件
准备以下环境:
- TDengine 集群已部署并正常运行(企业及社区版均可)
- taosAdapter 能够正常运行。详细参考 [taosAdapter 使用手册](../../../reference/components/taosadapter)
- Apache Superset v2.1.0 或以上版本已安装。安装 Apache Superset 请参考 [官方文档](https://superset.apache.org/)
- TDengine 3.2.3.0 及以上版本集群已部署并正常运行(企业及社区版均可)。
- taosAdapter 能够正常运行,详细参考 [taosAdapter 使用手册](../../../reference/components/taosadapter)。
- Apache Superset v2.1.0 或以上版本已安装,安装 Apache Superset 请参考 [官方文档](https://superset.apache.org/)。
- 安装 Python 连接器驱动,详细参考 [TDengine Python Connector](../../../reference/connector/python)。
## 安装 TDengine Python 连接器
TDengine Python 连接器从 `v2.1.18` 起带 Superset 连接驱动,会安装至 Superset 相应目录下并向 Superset 提供数据源服务
Superset 与 TDengine 之间使用 WebSocket 协议连接,需安装支持此协议的 `taos-ws-py` 组件, 全部安装脚本如下:
```bash
pip3 install taospy
pip3 install taos-ws-py
```
## 配置 TDengine 数据源
**第 1 步**,进入新建数据库连接页面 "Superset" → "Setting" → "Database Connections" → "+DATABASE"
**第 2 步**,选择 TDengine 数据库连接。"SUPPORTED DATABASES" 下拉列表中选择 "TDengine" 项。
:::tip
注意:若下拉列表中无 "TDengine" 项,请检查安装顺序,确保 `TDengine Python 连接器``Superset` 安装之后再安装。
TDengine Python 连接器从 `v2.1.18` 起带 Superset 连接驱动,会安装至 Superset 相应目录下并向 Superset 提供数据源服务。
:::
## 配置数据源
**第 1 步**进入新建数据库连接页面【Superset】 -> 【Setting】->【Database Connections ->【+DATABASE】。
**第 2 步**,选择 TDengine 数据库连接。【SUPPORTED DATABASES】下拉列表中选择 `TDengine` 项。
:::tip
注意:若下拉列表中无 `TDengine` 项,请检查安装顺序,确保 `TDengine Python 连接器``Superset` 安装之后再安装。
:::
**第 3 步**"DISPLAY NAME" 中填写连接名称,任意填写即可。
**第 4 步**"SQLALCHEMY URL" 项为关键连接信息串,务必填写正确。
**第 3 步**【DISPLAY NAME】中填写连接名称任意填写即可。
**第 4 步**【SQLALCHEMY URL】项为关键连接信息串务必填写正确。
```bash
taosws://用户名:密码@主机名:端口号
```
| 参数名称 | <center>参数说明</center> |
|:------- |:-------------------------------- |
| 用户名 | 登录 TDengine 数据库用户名 |
@ -43,32 +44,33 @@ taosws://用户名:密码@主机名:端口号
| 主机名 | TDengine 数据库所在主机名称 |
| 端口号 | 提供 WebSocket 服务的端口默认6041 |
示例:
本机安装 TDengine 数据库WebSocket 服务端口 6041使用默认用户名密码"SQLALCHEMY URL" 应为:
示例:
本机安装 TDengine 数据库WebSocket 服务端口 6041使用默认用户名密码`SQLALCHEMY URL` 应为:
```bash
taosws://root:taosdata@localhost:6041
```
**第 5 步**,配置好连接串,点击 “TEST CONNECTION” 测试连接是否成功,测试通过后点击 “CONNECT” 按钮,完成连接。
**第 5 步**,配置好连接串,点击【TEST CONNECTION】测试连接是否成功测试通过后点击【CONNECT】按钮,完成连接。
## 数据分析
## 开始使用
### 数据准备
TDengine 数据源与其它数据源使用上无差别,这里简单介绍下数据查询:
1. Superset 界面点击右上角 “+” 号按钮,选择 “SQL query”, 进入查询界面
2. 左上角 “DATABASE” 下拉列表中选择前面已创建好的 “TDengine” 数据源
3. “SCHEMA” 下拉列表,选择要操作的数据库名(系统库不显示)
4. “SEE TABLE SCHEMA” 选择要操作的超级表名或普通表名(子表不显示)
5. 随后会在下方显示选定表的 SCHEMA 信息
6. 在 SQL 编辑器区域可输入符合 TDengine 语法的任意 SQL 语句执行
TDengine 数据源与其它数据源使用上无差别,这里简单介绍下数据查询:
## 示例效果
1. `Superset` 界面点击右上角【+】号按钮,选择 `SQL query`, 进入查询界面。
2. 左上角【DATABASE】下拉列表中选择前面已创建好的 `TDengine` 数据源。
3. 【SCHEMA】下拉列表选择要操作的数据库名系统库不显示
4. 【SEE TABLE SCHEMA】选择要操作的超级表名或普通表名子表不显示
5. 随后会在下方显示选定表的 `SCHEMA` 信息。
6. 在 `SQL` 编辑器区域可输入符合 `TDengine` 语法的任意 `SQL` 语句执行。
我们选择 Superset Chart 模板中较流行的两个模板做了效果展示,以智能电表数据为例:
### 智能电表样例
1. "Aggregate" 类型,展示在第 4 组中指定时间段内每分钟采集电压值(voltage)最大值
我们选择【Superset Chart】模板中较流行的两个模板做了效果展示以智能电表数据为例
![superset-demo1](./superset-demo1.jpeg)
2. "RAW RECORDS" 类型,展示在第 4 组中指定时间段内 current, voltage 的采集值
![superset-demo2](./superset-demo2.jpeg)
1. `Aggregate` 类型,展示在第 4 组中指定时间段内每分钟采集电压值(voltage)最大值。
![superset-demo1](./superset-demo1.jpeg)
2. `RAW RECORDS` 类型,展示在第 4 组中指定时间段内 current, voltage 的采集值。
![superset-demo2](./superset-demo2.jpeg)

View File

@ -11,7 +11,7 @@ Tableau 是一款知名的商业智能工具,它支持多种数据源,可方
- TDengine 3.3.5.4 以上版本集群已部署并正常运行(企业及社区版均可)
- taosAdapter 能够正常运行。详细参考 [taosAdapter 参考手册](../../../reference/components/taosadapter)
- Tableau 桌面版安装并运行(如未安装,请下载并安装 Windows 操作系统 64 位 [Tableau 桌面版](https://www.tableau.com/products/desktop/download) )。安装 Tableau 桌面版请参考 [官方文档](https://www.tableau.com)。
- 从TDengine官网下载最新的Windows操作系统X64客户端驱动程序并进行安装。详细参考 [安装 ODBC 驱动](../../../reference/connector/odbc/#安装)。
- 从 TDengine 官网下载最新的 Windows 操作系统 X64 客户端驱动程序,并进行安装。详细参考 [安装 ODBC 驱动](../../../reference/connector/odbc/#安装)。
## 配置数据源

View File

@ -459,6 +459,7 @@ taosd 命令行参数如下
- 支持版本:从 v3.1.0.0 版本开始引入
:::info
#### 区域相关参数说明
1. 为应对多时区的数据写入和查询问题TDengine 采用 Unix 时间戳(Unix Timestamp)来记录和存储时间戳。Unix 时间戳的特点决定了任一时刻不论在任何时区产生的时间戳均一致。需要注意的是Unix 时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的 Unix 时间戳,需要设置正确的时区。
在 Linux/macOS 中,客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如:
@ -1704,30 +1705,24 @@ taosd 会将监控指标上报给 taosKeeper这些监控指标会被 taosKeep
| duration | VARCHAR | TAG | sql执行耗时取值范围 3-10s,10-100s,100-1000s,1000s- |
| cluster\_id | VARCHAR | TAG | cluster id |
## 日志相关
### taos\_slow\_sql\_detail 表
TDengine 通过日志文件记录系统运行状态,帮助用户监控系统运行情况,排查问题,这里主要介绍 taosc 和 taosd 两个系统日志的相关说明。
`taos_slow_sql_detail` 记录客户端慢查询详细信息。子表名规则为 `{user}_{db}_{ip}_clusterId_{cluster_id}`
TDengine 的日志文件主要包括普通日志和慢日志两种类型。
1. 普通日志行为说明
1. 同一台机器上可以起多个客户端进程,所以客户端日志命名方式为 taoslogX.Y其中 X 为序号,为空或者 0 到 9Y 为后缀 0 或者 1。
2. 同一台机器上只能有一个服务端进程。所以服务端日志命名方式为 taosdlog.Y其中 Y 为后缀, 0 或者 1。
序号和后缀确定规则如下(假设日志路径为 /var/log/taos/
1. 确定序号:使用 10 个序号作为日志命名方式,/var/log/taos/taoslog0.Y - /var/log/taos/taoslog9.Y依次检测每个序号是否使用找到第一个没使用的序号作为该进程的日志文件使用的序号。 如果 10 个序号都被进程使用,不使用序号,即 /var/log/taos/taoslog.Y进程都往相同的文件里写序号为空
2. 确定后缀0 或者 1。比如确定序号为 3备选的日志文件名就为 /var/log/taos/taoslog3.0 /var/log/taos/taoslog3.1。如果两个文件都不存在用后缀 0一个存在一个不存在用存在的后缀。两个都存在用修改时间最近的那个后缀。
3. 如果日志文件超过配置的条数 numOfLogLines会切换后缀名继续写日志比如/var/log/taos/taoslog3.0 写够了,切换到 /var/log/taos/taoslog3.1 继续写日志。/var/log/taos/taoslog3.0 会添加时间戳后缀重命名并压缩存储(异步线程操作)。
4. 通过配置 logKeepDays 控制日志文件保存几天,几天之外的日志会被删除。比如配置为 1则一天之前的日志会在新日志压缩存储时检测删除。不是自然天。
系统除了记录普通日志以外,对于执行时间超过配置时间的 SQL 语句,会被记录到慢日志中。慢日志文件主要用于分析系统性能,排查性能问题。
2. 慢日志行为说明
1. 慢日志一方面会记录到本地慢日志文件中,另一方面会通过 taosAdapter 发送到 taosKeeper 进行结构化存储(需打开 monitorr 开关)。
2. 慢日志文件存储规则为:
1. 慢日志文件一天一个,如果当天没有慢日志,没有当天的文件。
2. 文件名为 taosSlowLog.yyyy-mm-ddtaosSlowLog.2024-08-02日志存储路径通过 logDir 配置。
3. 多个客户端的日志存储在相应日志路径下的同一个 taosSlowLog.yyyy.mm.dd 文件里。
4. 慢日志文件不自动删除,不压缩。
5. 使用和普通日志文件相同的三个参数 logDir, minimalLogDirGB, asyncLog。另外两个参数 numOfLogLineslogKeepDays 不适用于慢日志。
| field | type | is\_tag | comment |
| :------------- | :-------- | :------ | :---------------------------------------------------- |
| start\_ts | TIMESTAMP | | sql 开始执行的客户端时间单位ms主键 |
| request\_id | UINT64_T | | sql 请求的 request id为 hash 生产的随机值 |
| query\_time | INT32_T | | sql 执行耗时, 单位ms |
| code | INT32_T | | sql 执行返回码0表示成功 |
| error\_info | VARCHAR | | sql 执行失败时,记录的错误信息 |
| type | INT8_T | | sql 语句的类型1-查询2-写入4-其他) |
| rows\_num | INT64_T | | sql 执行结果的记录数目 |
| sql | VARCHAR | | sql 语句的字符串 |
| process\_name | VARCHAR | | 进程名称 |
| process\_id | VARCHAR | | 进程 id |
| db | VARCHAR | TAG | 执行 sql 所属数据库 |
| user | VARCHAR | TAG | 执行 sql 语句的用户 |
| ip | VARCHAR | TAG | 记录执行 sql 语句的 client 的 ip 地址 |
| cluster\_id | VARCHAR | TAG | cluster id |

View File

@ -23,11 +23,11 @@ description: TDengine 保留关键字的详细列表
| ALIVE | |
| ALL | |
| ALTER | |
| ANALYZE | 3.3.4.3 及后续版本 |
| ANALYZE | 3.3.4.3+ |
| AND | |
| ANODE | 3.3.4.3 及后续版本 |
| ANODES | 3.3.4.3 及后续版本 |
| ANOMALY_WINDOW | 3.3.4.3 及后续版本 |
| ANODE | 3.3.4.3+ |
| ANODES | 3.3.4.3+ |
| ANOMALY_WINDOW | 3.3.4.3+ |
| ANTI | |
| APPS | |
| ARBGROUPS | |
@ -37,7 +37,8 @@ description: TDengine 保留关键字的详细列表
| ASOF | |
| AT_ONCE | |
| ATTACH | |
| AUTO | 3.3.5.0 及后续版本 |
| AUTO | 3.3.5.0+ |
| ASSIGN | 3.3.6.0+ |
### B
|关键字|说明|
@ -77,16 +78,16 @@ description: TDengine 保留关键字的详细列表
| CLIENT_VERSION | |
| CLUSTER | |
| COLON | |
| COLS | |
| COLS | 3.3.6.0+ |
| COLUMN | |
| COMMA | |
| COMMENT | |
| COMP | |
| COMPACT | |
| COMPACTS | |
| COMPACT_INTERVAL | 3.3.5.0 及后续版本 |
| COMPACT_TIME_OFFSET | 3.3.5.0 及后续版本 |
| COMPACT_TIME_RANGE | 3.3.5.0 及后续版本 |
| COMPACT_INTERVAL | 3.3.5.0+ |
| COMPACT_TIME_OFFSET | 3.3.5.0+ |
| COMPACT_TIME_RANGE | 3.3.5.0+ |
| CONCAT | |
| CONFLICT | |
| CONNECTION | |
@ -116,7 +117,7 @@ description: TDengine 保留关键字的详细列表
| DESC | |
| DESCRIBE | |
| DETACH | |
| DISK_INFO | 3.3.5.0 及后续版本 |
| DISK_INFO | 3.3.5.0+ |
| DISTINCT | |
| DISTRIBUTED | |
| DIVIDE | |
@ -149,19 +150,19 @@ description: TDengine 保留关键字的详细列表
|关键字|说明|
|----------------------|-|
| FAIL | |
| FHIGH | 3.3.4.3 及后续版本 |
| FHIGH | 3.3.4.3+ |
| FILE | |
| FILL | |
| FILL_HISTORY | |
| FIRST | |
| FLOAT | |
| FLOW | 3.3.4.3 及后续版本 |
| FLOW | 3.3.4.3+ |
| FLUSH | |
| FOR | |
| FORCE | |
| FORCE_WINDOW_CLOSE | 3.3.4.3 及后续版本 |
| FORCE_WINDOW_CLOSE | 3.3.4.3+ |
| FROM | |
| FROWTS | 3.3.4.3 及后续版本 |
| FROWTS | 3.3.4.3+ |
| FULL | |
| FUNCTION | |
| FUNCTIONS | |
@ -207,7 +208,7 @@ description: TDengine 保留关键字的详细列表
| INTO | |
| IPTOKEN | |
| IROWTS | |
| IROWTS_ORIGIN | 3.3.5.0 及后续版本 |
| IROWTS_ORIGIN | 3.3.5.0+ |
| IS | |
| IS_IMPORT | |
| ISFILLED | |
@ -259,6 +260,7 @@ description: TDengine 保留关键字的详细列表
| MEDIUMBLOB | |
| MERGE | |
| META | |
| META_ONLY | 3.3.6.0+ |
| MINROWS | |
| MINUS | |
| MNODE | |
@ -276,8 +278,8 @@ description: TDengine 保留关键字的详细列表
| NONE | |
| NORMAL | |
| NOT | |
| NOTIFY | 3.3.6.0 及后续版本 |
| NOTIFY_HISTORY | 3.3.6.0 及后续版本 |
| NOTIFY | 3.3.6.0+ |
| NOTIFY_HISTORY | 3.3.6.0+ |
| NOTNULL | |
| NOW | |
| NULL | |
@ -291,7 +293,7 @@ description: TDengine 保留关键字的详细列表
| OFFSET | |
| ON | |
| ONLY | |
| ON_FAILURE | 3.3.6.0 及后续版本 |
| ON_FAILURE | 3.3.6.0+ |
| OR | |
| ORDER | |
| OUTER | |
@ -339,7 +341,7 @@ description: TDengine 保留关键字的详细列表
| RATIO | |
| READ | |
| RECURSIVE | |
| REGEXP | 3.3.6.0 及后续版本 |
| REGEXP | 3.3.6.0+ |
| REDISTRIBUTE | |
| REM | |
| REPLACE | |
@ -428,7 +430,7 @@ description: TDengine 保留关键字的详细列表
| TRANSACTIONS | |
| TRIGGER | |
| TRIM | |
| TRUE_FOR | |
| TRUE_FOR | 3.3.6.0+ |
| TSDB_PAGESIZE | |
| TSERIES | |
| TSMA | |

View File

@ -4,6 +4,7 @@ title: 权限管理
---
TDengine 中的权限管理分为[用户管理](../user)、数据库授权管理以及消息订阅授权管理,本节重点说明数据库授权和订阅授权。
授权管理仅在 TDengine 企业版中可用,请联系 TDengine 销售团队。授权语法在社区版可用,但不起作用。
## 数据库访问授权

View File

@ -68,6 +68,10 @@ WebSocket Connector 历史版本:
| WebSocket Connector 版本 | 主要变化 | TDengine 版本 |
| ----------------------- | ------------------------------------------------------------------------------------ | ----------------- |
| 0.3.9 | 修复 fetchmany 自定义行数时获取不完全的问题 | - |
| 0.3.8 | 支持 SuperSet 连接到 TDengine 云服务实例 | - |
| 0.3.5 | 修复 crypto provider 中的问题 | - |
| 0.3.4 | 支持 VARBINARY 和 GEOMETRY 数据类型 | 3.3.0.0 及更高版本 |
| 0.3.2 | 优化 WebSocket sql 查询和插入性能,修改 readme 和 文档,修复已知问题 | 3.2.3.0 及更高版本 |
| 0.2.9 | 已知问题修复 | - |
| 0.2.5 | 1. 数据订阅支持获取消费进度和重置消费进度 <br/> 2. 支持 schemaless <br/> 3. 支持 STMT | - |

View File

@ -0,0 +1,107 @@
---
sidebar_label: 日志系统
title: 日志系统
toc_max_heading_level: 4
---
TDengine 通过日志文件记录系统运行状态帮助用户监控系统运行情况排查问题。Log 分为普通日志和慢日志。引擎测的运行状态通过普通日志的方式记录下来,系统运行相关的慢日志操作则记录到慢日志文件里。
## 普通日志
### 普通日志实现逻辑
- 普通日志分同步和异步两种方式,同步立即写入日志文件,异步写入到 buff 里,然后定时写入日志文件。
- 异步方式日志文件缓存在循环 buff 里, buff 的大小为 buffSize = 20 M。如果某次写buf 的日志大小大于buf 可用空间,本次日志会舍弃,日志里记录: ...Lost N lines here...
![TDengine 日志循环buff](./normal_log1.png)
- 异步线程里每隔 1 s 会更新磁盘信息用于判断是否有空间写日志
- 异步线程每隔 Interval 时间处理一次写入逻辑。写入规则如下:
- 如果buff 里数据小于 buffSize/10不写入磁盘除非超过1 s。
- 如果buff 里数据大于 buffSize/10全部写入磁盘。
- Interval 默认值为 25 msInterval 值会根据每次写入日志的大小动态调整。Interval 调试规则如下:
- 数据量小时(小于 buffSize/10增大写入间隔Interval 每次增加 5ms最大25ms。
- 数据量大时(大于 buffSize/3写入间隔最小Interval 为 5ms。
- 数据量比较大时(大于 buffSize/4小于等于buffSize/3减小写入间隔Interval 每次减小 5ms最小5ms。
- 数据量适中时(大于等于 buffSize/10小于等于buffSize/4写入间隔不变。
![TDengine 日志buff调整方式](./normal_log2.png)
### 普通日志行为说明
- 普通日志命名规则
- 同一台机器上可以起多个客户端进程,所以客户端日志命名方式为 taoslogX.Y其中 X 为序号,为空或者 0 到 9Y 为后缀 0 或者 1 windows 限制只有一个序号,所以格式为 taoslog.Y
- 同一台机器上可以起多个服务端进程。所以服务端日志命名方式为 taosdlog.Y其中 Y 为后缀, 0 或者 1。
- 序号和后缀确定规则如下(假设日志路径为 /var/log/taos/
- 确定序号:使用 10 个序号作为日志命名方式,/var/log/taos/taoslog0.Y - /var/log/taos/taoslog9.Y依次检测每个序号是否使用找到第一个没使用的序号作为该进程的日志文件使用的序号。 如果 10 个序号都被进程使用,不使用序号,即 /var/log/taos/taoslog.Y进程都往相同的文件里写序号为空
- 确定后缀0 或者 1。比如确定序号为 3备选的日志文件名就为 /var/log/taos/taoslog3.0 /var/log/taos/taoslog3.1。如果两个文件都不存在用后缀 0一个存在一个不存在用存在的后缀。两个都存在用修改时间最近的那个后缀。
- 如果日志文件超过配置的条数 numOfLogLines会切换后缀名继续写日志比如/var/log/taos/taoslog3.0 写够了,切换到 /var/log/taos/taoslog3.1 继续写日志。/var/log/taos/taoslog3.0 会添加时间戳后缀重命名并压缩存储(异步线程操作)。
- 通过配置 logKeepDays 控制日志文件保存几天,几天之外的日志会被删除。比如配置为 1则一天之前的日志会在新日志压缩存储时检测删除。不是自然天。
- 当文件里日志行数大于 numOfLogLines默认 1000w取值范围 1000-20亿会触发日志归档。
- 举例taoslog3.0 写满了,切换到 taoslog3.1 继续写。taoslog3.0 重命名为 taoslog.1735616543,然后压缩为 taoslog.1735616543.gz。同时如果 logKeepDays > 0会检测是否有超时的日志文件然后删除。该过程异步执行
## 慢日志
系统除了记录普通日志以外,对于执行时间超过配置时间的操作,会被记录到慢日志中。慢日志文件主要用于分析系统性能,排查性能问题。
### 慢日志实现逻辑
#### 上报架构
![TDengine 上报框架](./slow_log1.png)
#### 缓存逻辑
- 为了提高上报效率,慢 sql 日志上报方式为批量上报。
- 慢 sql 日志上报为了防止缓存丢失采用写临时文件方式来实现缓存crash 后不会丢失)。
- 每生成一条慢 sql 日志都会放到队列里,然后通知 slow log 线程从队列获取数据slow log 线程根据数据里 clusterId 写到不同的文件里。
数据格式如下其中clusterId 为当前日志所属的慢查询集群idvalue 为一条数据json字符串形式
```c
typedef struct {
int64_t clusterId;
char *value;
}MonitorSlowLogData
```
- 说明:
- 因为客户端进程里可能存在很多个链接 connection所以需要将慢查询日志根据 clusterId 来分组。分组方式通过临时文件名来实现,命名方式为 ```{tmp dir}/tdengine_slow_log/tdengeine-{clusterId1}-{processId}-{rand}```processId 为进程ID主要为了区分多个客户端的上报。
- 如上图 connection 1 连接的是 cluster 1。connection 2connection 3 连接的是 cluster 2所以connection 1 的慢 sql 数据写入文件 ```{tmp dir}/tdengine_slow_log/tdengeine-{clusterId1}-{processId}-{rand}```connection 2 和 connection 3的慢 sql 数据写入文件 ```{tmp dir}/tdengine_slow_log/tdengeine-{clusterId1}-{processId}-{rand}```
#### 上报逻辑
- 读取 ```{tmp dir}/tdengine_slow_log/tdengeine-{clusterId1}-{processId}-{rand}``` 临时文件内容,每行数据作为 json 数组的一个元素,组装成 json 数组上报(文件里数据每接近 1M大小上报一次上报成功后记录读取文件进度上报采用异步上报方式。在 callback 里继续根据上次的进度继续读取文件的内容上报直至整个文件读取上报完毕上报完毕后会清空临时文件callback 里成功或失败都会继续读取文件,失败时会记录上报失败的数据日志)。每接近 1M 上报一次主要为了防止文件太大,放在一次上报失败)。
#### 上报时机
- 客户端运行过程中定时上报
- 每个 monitorInterval 时间间隔上报数据。
- 客户端正常退出
- 上报所有慢 sql 日志文件, 上报成功后,删除文件。
- 客户端异常退出
- 异常退出后再次与某个集群(clusterId)建立新的链接后遍历 ```{tmp dir}/tdengine_slow_log/``` 目录下 ```tdengine-{clusterId}``` 开头的所有文件进行重新上报(这些文件可能是另一个客户端进程或本进程正在操作的。所以每个文件打开时都需要添加文件锁),然后删除这个临时文件。
#### 一些异常行为说明
- 因为上报数据和删除文件里的上报内容没法作为一个原子操作,所以如果上报后还没删除数据就 crash可能导致下次重复上报重复上报的数据会覆盖并没丢失影响很小。
- 另外为了保证性能, slow log thread 线程把慢 sql 日志写入临时文件缓存,只保证刷新到操作系统的磁盘缓冲区,并不真正每次都 fsync 到磁盘,所以如果机器断电,仍可能丢失数据。该异常出现概率很小,可以容忍此种情况下的数据丢失。
### 慢日志行为说明
- 慢日志一方面会记录到本地慢日志文件中,另一方面会通过 taosAdapter 发送到 taosKeeper 进行结构化存储(需打开 monitorr 开关)。
- 慢日志文件存储规则为:
- 慢日志文件一天一个,如果当天没有慢日志,没有当天的文件。
- 文件名为 taosSlowLog.yyyy-mm-ddtaosSlowLog.2024-08-02日志存储路径通过 logDir 配置。
- 多个客户端的日志存储在相应日志路径下的同一个 taosSlowLog.yyyy.mm.dd 文件里。
- 慢日志文件不自动删除,不压缩。
- 使用和普通日志文件相同的三个参数 logDir, minimalLogDirGB, asyncLog。另外两个参数 numOfLogLineslogKeepDays 不适用于慢日志。
## 日志级别说明
日志级别分为9种如下所示
```c
typedef enum {
DEBUG_FATAL = 1,
DEBUG_ERROR = 1,
DEBUG_WARN = 2,
DEBUG_INFO = 2,
DEBUG_DEBUG = 4,
DEBUG_TRACE = 8,
DEBUG_DUMP = 16,
DEBUG_SCREEN = 64,
DEBUG_FILE = 128
} ELogLevel;
```
日志开关通过 bit 位来控制,具体如下:
![TDengine 日志级别](./slow_log2.png)
例如:
- 131 = 128 + 2 + 1 文件 + info + error
- 135 = 128 + 4 + 2 + 1 文件 + debug + info + error
- 143 = 128 + 8 + 4 + 2 + 1 文件 + trace + debug + info + error
通过设置日志开关的参数,可以开启不同级别的日志。

Binary file not shown.

After

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 446 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

View File

@ -0,0 +1,14 @@
---
title: 产品路线图
---
TDengine OSS 之 2025 年年度路线图如下表所示。
| 季度 | 功能 |
| :----- | :----- |
| 2025Q1 | <ol><li>虚拟表</li><li>查询能力:<code>REGEXP</code><code>GREATEST</code><code>LEAST</code><code>CAST</code> 函数支持判断表达式、单行选择函数的其他列值、<code>INTERP</code> 支持插值时间范围</li><li>存储能力:支持将查询结果写入超级表、超级表支持 <code>KEEP</code> 参数、STMT 写入性能提升</li><li>流计算:支持虚拟表、计算资源优化、事件通知机制、创建时间优化</li><li>数据类型Decimal</li><li>高可用:加快宕机恢复时间、优化客户端 Failover 机制</li><li>稳定性:开始维护新的稳定版本 3.3.6.x</li><li>JDBC高效写入</li><li>生态工具:对接 Tableau</li><li>生态工具:对接 Excel</li></ol> |
| 2025Q2 | <ol><li>查询能力:大幅放宽关联查询限制、支持 MySQL 所有数学函数、支持积分/积分平均/连续方差函数、<code>CSUM</code> 函数优化、<code>COUNT(DISTINCT)</code> 语法、事件窗口功能增强、提升标签过滤性能、提升 <code>INTERP</code> 查询性能</li><li>存储能力TSMA 计算资源优化、写入抖动优化</li><li>流计算:节点高可用</li><li>数据类型BLOB</li><li>数据订阅:支持 MQTT 协议</li><li>高可用:提高副本变更速度、提高集群宕机恢复速度、优化断电数据恢复机制</li><li>可观测性:写入诊断工具</li><li>生态工具:对接帆软 FineBI</li></ol> |
| 2025Q3 | <ol><li>查询能力:支持更多子查询类型、支持 MySQL 运算符、支持 MySQL 所有时间函数、窗口计算逻辑优化、查询性能抖动、计数窗口允许指定列</li><li>存储能力:提高 SQL 模式写入速度</li><li>可观测性:查询诊断工具、优化 <code>EXPLAIN</code> 输出、长任务观测</li></ol> |
| 2025Q4 | <ol><li>查询能力:窗口函数(<code>OVER</code> 子句)、支持 MySQL 所有字符串/聚合/条件函数、Partition 支持组内排序、控制查询资源占用、提高子表聚合查询性能、<code>INTERVAL</code> 窗口支持插值时间范围</li><li>数据类型:支持不定长度字符串数据类型</li><li>数据缓存:提升按行缓存性能</li><li>可观测性:增强运维可观测性</li></ol> |
欲了解更多信息,请参见 [TDengine Public Roadmap](https://github.com/orgs/taosdata/projects/4) 。

View File

@ -359,6 +359,7 @@ typedef enum ENodeType {
QUERY_NODE_CREATE_ANODE_STMT,
QUERY_NODE_DROP_ANODE_STMT,
QUERY_NODE_UPDATE_ANODE_STMT,
QUERY_NODE_ASSIGN_LEADER_STMT,
// show statement nodes
// see 'sysTableShowAdapter', 'SYSTABLE_SHOW_TYPE_OFFSET'
@ -1639,6 +1640,7 @@ typedef struct {
int32_t sqlLen;
char* sql;
SArray* vgroupIds;
int8_t metaOnly;
} SCompactDbReq;
int32_t tSerializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq);
@ -2083,6 +2085,7 @@ typedef struct {
int64_t compactStartTime;
STimeWindow tw;
int32_t compactId;
int8_t metaOnly;
} SCompactVnodeReq;
int32_t tSerializeSCompactVnodeReq(void* buf, int32_t bufLen, SCompactVnodeReq* pReq);
@ -2583,6 +2586,7 @@ typedef struct {
char* arbToken;
int64_t arbTerm;
char* memberToken;
int8_t force;
} SVArbSetAssignedLeaderReq;
int32_t tSerializeSVArbSetAssignedLeaderReq(void* buf, int32_t bufLen, SVArbSetAssignedLeaderReq* pReq);
@ -2661,6 +2665,15 @@ int32_t tSerializeSBalanceVgroupReq(void* buf, int32_t bufLen, SBalanceVgroupReq
int32_t tDeserializeSBalanceVgroupReq(void* buf, int32_t bufLen, SBalanceVgroupReq* pReq);
void tFreeSBalanceVgroupReq(SBalanceVgroupReq* pReq);
typedef struct {
int32_t useless; // useless
int32_t sqlLen;
char* sql;
} SAssignLeaderReq;
int32_t tSerializeSAssignLeaderReq(void* buf, int32_t bufLen, SAssignLeaderReq* pReq);
int32_t tDeserializeSAssignLeaderReq(void* buf, int32_t bufLen, SAssignLeaderReq* pReq);
void tFreeSAssignLeaderReq(SAssignLeaderReq* pReq);
typedef struct {
int32_t vgId1;
int32_t vgId2;

View File

@ -421,6 +421,7 @@
TD_DEF_MSG_TYPE(TDMT_MND_ARB_CHECK_SYNC_TIMER, "mnd-arb-check-sync-tmr", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_ARB_UPDATE_GROUP, "mnd-arb-update-group", NULL, NULL) // no longer used
TD_DEF_MSG_TYPE(TDMT_MND_ARB_UPDATE_GROUP_BATCH, "mnd-arb-update-group-batch", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_ARB_ASSIGN_LEADER, "mnd-arb-assign-leader", NULL, NULL)
TD_CLOSE_MSG_SEG(TDMT_MND_ARB_MSG)
TD_NEW_MSG_SEG(TDMT_MAX_MSG) // msg end mark

View File

@ -169,6 +169,7 @@ typedef struct SCompactDatabaseStmt {
char dbName[TSDB_DB_NAME_LEN];
SNode* pStart;
SNode* pEnd;
bool metaOnly;
} SCompactDatabaseStmt;
typedef struct SCompactVgroupsStmt {
@ -177,6 +178,7 @@ typedef struct SCompactVgroupsStmt {
SNodeList* vgidList;
SNode* pStart;
SNode* pEnd;
bool metaOnly;
} SCompactVgroupsStmt;
typedef struct STableOptions {
@ -683,6 +685,10 @@ typedef struct SBalanceVgroupStmt {
ENodeType type;
} SBalanceVgroupStmt;
typedef struct SAssignLeaderStmt {
ENodeType type;
} SAssignLeaderStmt;
typedef struct SBalanceVgroupLeaderStmt {
ENodeType type;
int32_t vgId;

View File

@ -158,11 +158,14 @@ int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const ch
int32_t qStmtBindParams2(SQuery* pQuery, TAOS_STMT2_BIND* pParams, int32_t colIdx, void* charsetCxt);
int32_t qBindStmtStbColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bind, char* msgBuf, int32_t msgBufLen,
STSchema** pTSchema, SBindInfo2* pBindInfos, void *charsetCxt);
int32_t qBindStmtColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bind, char* msgBuf, int32_t msgBufLen, void *charsetCxt);
int32_t qBindStmtColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bind, char* msgBuf, int32_t msgBufLen,
void* charsetCxt);
int32_t qBindStmtSingleColValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bind, char* msgBuf, int32_t msgBufLen,
int32_t colIdx, int32_t rowNum, void *charsetCxt);
int32_t colIdx, int32_t rowNum, void* charsetCxt);
int32_t qBindStmt2RowValue(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bind, char* msgBuf, int32_t msgBufLen,
STSchema** pTSchema, SBindInfo2* pBindInfos, void* charsetCxt);
int32_t qBindStmtTagsValue2(void* pBlock, void* boundTags, int64_t suid, const char* sTableName, char* tName,
TAOS_STMT2_BIND* bind, char* msgBuf, int32_t msgBufLen, void *charsetCxt);
TAOS_STMT2_BIND* bind, char* msgBuf, int32_t msgBufLen, void* charsetCxt);
void destroyBoundColumnInfo(void* pBoundInfo);
int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* dbName, char* msgBuf,

View File

@ -101,6 +101,7 @@ typedef struct {
bool autoCreateTbl;
SHashObj *pVgHash;
SBindInfo2 *pBindInfo;
bool bindRowFormat;
SStbInterlaceInfo siInfo;
} SStmtSQLInfo2;

View File

@ -39,38 +39,41 @@ static FORCE_INLINE int32_t stmtAllocQNodeFromBuf(STableBufInfo* pTblBuf, void**
}
bool stmtDequeue(STscStmt* pStmt, SStmtQNode** param) {
(void)taosThreadMutexLock(&pStmt->queue.mutex);
int i = 0;
while (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) {
(void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex);
if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) {
if (i < 10) {
taosUsleep(1);
i++;
} else {
(void)taosThreadMutexLock(&pStmt->queue.mutex);
if (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) {
(void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex);
}
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
return false;
}
}
if (pStmt->queue.stopQueue) {
return false;
}
SStmtQNode* orig = pStmt->queue.head;
SStmtQNode* node = pStmt->queue.head->next;
pStmt->queue.head = pStmt->queue.head->next;
*param = node;
(void)atomic_sub_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1);
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
*param = node;
return true;
}
void stmtEnqueue(STscStmt* pStmt, SStmtQNode* param) {
(void)taosThreadMutexLock(&pStmt->queue.mutex);
pStmt->queue.tail->next = param;
pStmt->queue.tail = param;
pStmt->stat.bindDataNum++;
(void)taosThreadMutexLock(&pStmt->queue.mutex);
(void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1);
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
}
@ -423,11 +426,9 @@ void stmtResetQueueTableBuf(STableBufInfo* pTblBuf, SStmtQueue* pQueue) {
pTblBuf->buffIdx = 1;
pTblBuf->buffOffset = sizeof(*pQueue->head);
(void)taosThreadMutexLock(&pQueue->mutex);
pQueue->head = pQueue->tail = pTblBuf->pCurBuff;
pQueue->qRemainNum = 0;
pQueue->head->next = NULL;
(void)taosThreadMutexUnlock(&pQueue->mutex);
}
int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool deepClean) {
@ -778,7 +779,7 @@ void* stmtBindThreadFunc(void* param) {
STscStmt* pStmt = (STscStmt*)param;
while (true) {
if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) {
if (pStmt->queue.stopQueue) {
break;
}
@ -1630,8 +1631,9 @@ int stmtClose(TAOS_STMT* stmt) {
STMT_DLOG_E("start to free stmt");
pStmt->queue.stopQueue = true;
(void)taosThreadMutexLock(&pStmt->queue.mutex);
(void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1);
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);

View File

@ -39,34 +39,41 @@ static FORCE_INLINE int32_t stmtAllocQNodeFromBuf(STableBufInfo* pTblBuf, void**
}
static bool stmtDequeue(STscStmt2* pStmt, SStmtQNode** param) {
(void)taosThreadMutexLock(&pStmt->queue.mutex);
int i = 0;
while (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) {
(void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex);
if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) {
if (i < 10) {
taosUsleep(1);
i++;
} else {
(void)taosThreadMutexLock(&pStmt->queue.mutex);
if (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) {
(void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex);
}
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
return false;
}
}
if (pStmt->queue.stopQueue) {
return false;
}
SStmtQNode* orig = pStmt->queue.head;
SStmtQNode* node = pStmt->queue.head->next;
pStmt->queue.head = pStmt->queue.head->next;
*param = node;
(void)atomic_sub_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1);
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
return true;
}
static void stmtEnqueue(STscStmt2* pStmt, SStmtQNode* param) {
(void)taosThreadMutexLock(&pStmt->queue.mutex);
pStmt->queue.tail->next = param;
pStmt->queue.tail = param;
pStmt->stat.bindDataNum++;
(void)atomic_add_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1);
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
pStmt->stat.bindDataNum++;
(void)taosThreadMutexLock(&pStmt->queue.mutex);
(void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1);
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
}
@ -343,11 +350,9 @@ static void stmtResetQueueTableBuf(STableBufInfo* pTblBuf, SStmtQueue* pQueue) {
pTblBuf->buffIdx = 1;
pTblBuf->buffOffset = sizeof(*pQueue->head);
(void)taosThreadMutexLock(&pQueue->mutex);
pQueue->head = pQueue->tail = pTblBuf->pCurBuff;
pQueue->qRemainNum = 0;
pQueue->head->next = NULL;
(void)taosThreadMutexUnlock(&pQueue->mutex);
}
static int32_t stmtCleanExecInfo(STscStmt2* pStmt, bool keepTable, bool deepClean) {
@ -704,7 +709,7 @@ static void* stmtBindThreadFunc(void* param) {
STscStmt2* pStmt = (STscStmt2*)param;
while (true) {
if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) {
if (pStmt->queue.stopQueue) {
break;
}
@ -1416,7 +1421,12 @@ int stmtBindBatch2(TAOS_STMT2* stmt, TAOS_STMT2_BIND* bind, int32_t colIdx) {
pStmt->exec.pCurrBlock = *pDataBlock;
if (pStmt->sql.stbInterlaceMode) {
taosArrayDestroy(pStmt->exec.pCurrBlock->pData->aCol);
pStmt->exec.pCurrBlock->pData->aCol = NULL;
(*pDataBlock)->pData->aCol = NULL;
}
if (colIdx < -1) {
pStmt->sql.bindRowFormat = true;
taosArrayDestroy((*pDataBlock)->pData->aCol);
(*pDataBlock)->pData->aCol = taosArrayInit(20, POINTER_BYTES);
}
}
@ -1444,10 +1454,21 @@ int stmtBindBatch2(TAOS_STMT2* stmt, TAOS_STMT2_BIND* bind, int32_t colIdx) {
if (pStmt->sql.stbInterlaceMode) {
(*pDataBlock)->pData->flags = 0;
code = qBindStmtStbColsValue2(*pDataBlock, pCols, bind, pStmt->exec.pRequest->msgBuf,
pStmt->exec.pRequest->msgBufLen, &pStmt->sql.siInfo.pTSchema, pStmt->sql.pBindInfo, pStmt->taos->optionInfo.charsetCxt);
pStmt->exec.pRequest->msgBufLen, &pStmt->sql.siInfo.pTSchema, pStmt->sql.pBindInfo,
pStmt->taos->optionInfo.charsetCxt);
} else {
code =
qBindStmtColsValue2(*pDataBlock, pCols, bind, pStmt->exec.pRequest->msgBuf, pStmt->exec.pRequest->msgBufLen, pStmt->taos->optionInfo.charsetCxt);
if (colIdx == -1) {
if (pStmt->sql.bindRowFormat) {
tscError("can't mix bind row format and bind column format");
STMT_ERR_RET(TSDB_CODE_TSC_STMT_API_ERROR);
}
code = qBindStmtColsValue2(*pDataBlock, pCols, bind, pStmt->exec.pRequest->msgBuf,
pStmt->exec.pRequest->msgBufLen, pStmt->taos->optionInfo.charsetCxt);
} else {
code = qBindStmt2RowValue(*pDataBlock, (*pDataBlock)->pData->aRowP, bind, pStmt->exec.pRequest->msgBuf,
pStmt->exec.pRequest->msgBufLen, &pStmt->sql.siInfo.pTSchema, pStmt->sql.pBindInfo,
pStmt->taos->optionInfo.charsetCxt);
}
}
if (code) {
@ -1460,6 +1481,11 @@ int stmtBindBatch2(TAOS_STMT2* stmt, TAOS_STMT2_BIND* bind, int32_t colIdx) {
STMT_ERR_RET(TSDB_CODE_TSC_STMT_API_ERROR);
}
if (pStmt->sql.bindRowFormat) {
tscError("can't mix bind row format and bind column format");
STMT_ERR_RET(TSDB_CODE_TSC_STMT_API_ERROR);
}
if (colIdx != (pStmt->bInfo.sBindLastIdx + 1) && colIdx != 0) {
tscError("bind column index not in sequence");
STMT_ERR_RET(TSDB_CODE_APP_ERROR);
@ -1690,11 +1716,11 @@ int stmtExec2(TAOS_STMT2* stmt, int* affected_rows) {
return pStmt->errCode;
}
(void)taosThreadMutexLock(&pStmt->asyncBindParam.mutex);
TSC_ERR_RET(taosThreadMutexLock(&pStmt->asyncBindParam.mutex));
while (atomic_load_8((int8_t*)&pStmt->asyncBindParam.asyncBindNum) > 0) {
(void)taosThreadCondWait(&pStmt->asyncBindParam.waitCond, &pStmt->asyncBindParam.mutex);
}
(void)taosThreadMutexUnlock(&pStmt->asyncBindParam.mutex);
TSC_ERR_RET(taosThreadMutexUnlock(&pStmt->asyncBindParam.mutex));
if (pStmt->sql.stbInterlaceMode) {
STMT_ERR_RET(stmtAddBatch2(pStmt));
@ -1788,6 +1814,7 @@ int stmtClose2(TAOS_STMT2* stmt) {
pStmt->queue.stopQueue = true;
(void)taosThreadMutexLock(&pStmt->queue.mutex);
(void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1);
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
@ -1796,11 +1823,11 @@ int stmtClose2(TAOS_STMT2* stmt) {
pStmt->bindThreadInUse = false;
}
(void)taosThreadMutexLock(&pStmt->asyncBindParam.mutex);
TSC_ERR_RET(taosThreadMutexLock(&pStmt->asyncBindParam.mutex));
while (atomic_load_8((int8_t*)&pStmt->asyncBindParam.asyncBindNum) > 0) {
(void)taosThreadCondWait(&pStmt->asyncBindParam.waitCond, &pStmt->asyncBindParam.mutex);
}
(void)taosThreadMutexUnlock(&pStmt->asyncBindParam.mutex);
TSC_ERR_RET(taosThreadMutexUnlock(&pStmt->asyncBindParam.mutex));
(void)taosThreadCondDestroy(&pStmt->queue.waitCond);
(void)taosThreadMutexDestroy(&pStmt->queue.mutex);

View File

@ -958,8 +958,49 @@ TEST(stmt2Case, stmt2_insert_non_statndard) {
"double,bool_col bool,binary_col binary(20),nchar_col nchar(20),varbinary_col varbinary(20),geometry_col "
"geometry(200)) tags(int_tag int,long_tag bigint,double_tag double,bool_tag bool,binary_tag "
"binary(20),nchar_tag nchar(20),varbinary_tag varbinary(20),geometry_tag geometry(200));");
do_query(taos, "use stmt2_testdb_6");
TAOS_STMT2_OPTION option = {0, false, false, NULL, NULL};
TAOS_STMT2_OPTION option = {0, true, true, NULL, NULL};
// less cols and tags using stb
{
TAOS_STMT2* stmt = taos_stmt2_init(taos, &option);
ASSERT_NE(stmt, nullptr);
const char* sql = "INSERT INTO stmt2_testdb_6.? using stmt2_testdb_6.stb1 (int_tag)tags(1) (ts) VALUES (?)";
int code = taos_stmt2_prepare(stmt, sql, 0);
checkError(stmt, code);
int total_affect_rows = 0;
int t64_len[2] = {sizeof(int64_t), sizeof(int64_t)};
int tag_i = 0;
int tag_l = sizeof(int);
int64_t ts[2] = {1591060628000, 1591060628100};
for (int i = 0; i < 3; i++) {
ts[0] += 1000;
ts[1] += 1000;
TAOS_STMT2_BIND tags1 = {TSDB_DATA_TYPE_INT, &tag_i, &tag_l, NULL, 1};
TAOS_STMT2_BIND tags2 = {TSDB_DATA_TYPE_INT, &tag_i, &tag_l, NULL, 1};
TAOS_STMT2_BIND params1 = {TSDB_DATA_TYPE_TIMESTAMP, &ts, &t64_len[0], NULL, 2};
TAOS_STMT2_BIND params2 = {TSDB_DATA_TYPE_TIMESTAMP, &ts, &t64_len[0], NULL, 2};
TAOS_STMT2_BIND* tagv[2] = {&tags1, &tags2};
TAOS_STMT2_BIND* paramv[2] = {&params1, &params2};
char* tbname[2] = {"tb1", "tb2"};
TAOS_STMT2_BINDV bindv = {2, &tbname[0], NULL, &paramv[0]};
code = taos_stmt2_bind_param(stmt, &bindv, -1);
checkError(stmt, code);
int affected_rows;
taos_stmt2_exec(stmt, &affected_rows);
total_affect_rows += affected_rows;
checkError(stmt, code);
}
ASSERT_EQ(total_affect_rows, 12);
taos_stmt2_close(stmt);
}
// less cols and tags
{
@ -985,7 +1026,7 @@ TEST(stmt2Case, stmt2_insert_non_statndard) {
TAOS_STMT2_BIND* tagv[2] = {&tags1, &tags2};
TAOS_STMT2_BIND* paramv[2] = {&params1, &params2};
char* tbname[2] = {"tb1", "tb2"};
char* tbname[2] = {"tb3", "tb4"};
TAOS_STMT2_BINDV bindv = {2, &tbname[0], &tagv[0], &paramv[0]};
code = taos_stmt2_bind_param(stmt, &bindv, -1);
checkError(stmt, code);
@ -1013,26 +1054,29 @@ TEST(stmt2Case, stmt2_insert_non_statndard) {
int tag_l = sizeof(int);
int tag_bl = 3;
int64_t ts[2] = {1591060628000, 1591060628100};
int64_t ts_2[2] = {1591060628800, 1591060628900};
int t64_len[2] = {sizeof(int64_t), sizeof(int64_t)};
int coli[2] = {1, 2};
int coli_2[2] = {3, 4};
int ilen[2] = {sizeof(int), sizeof(int)};
int total_affect_rows = 0;
for (int i = 0; i < 3; i++) {
ts[0] += 1000;
ts[1] += 1000;
ts_2[0] += 1000;
ts_2[1] += 1000;
TAOS_STMT2_BIND tags1[2] = {{TSDB_DATA_TYPE_BINARY, (void*)"abc", &tag_bl, NULL, 1},
{TSDB_DATA_TYPE_INT, &tag_i, &tag_l, NULL, 1}};
TAOS_STMT2_BIND tags2[2] = {{TSDB_DATA_TYPE_BINARY, (void*)"abc", &tag_bl, NULL, 1},
{TSDB_DATA_TYPE_INT, &tag_i, &tag_l, NULL, 1}};
TAOS_STMT2_BIND params1[2] = {{TSDB_DATA_TYPE_INT, &coli, &ilen[0], NULL, 2},
{TSDB_DATA_TYPE_TIMESTAMP, &ts, &t64_len[0], NULL, 2}};
TAOS_STMT2_BIND params2[2] = {{TSDB_DATA_TYPE_INT, &coli, &ilen[0], NULL, 2},
{TSDB_DATA_TYPE_TIMESTAMP, &ts, &t64_len[0], NULL, 2}};
TAOS_STMT2_BIND tags[2][2] = {
{{TSDB_DATA_TYPE_BINARY, (void*)"abc", &tag_bl, NULL, 1}, {TSDB_DATA_TYPE_INT, &tag_i, &tag_l, NULL, 1}},
{{TSDB_DATA_TYPE_BINARY, (void*)"def", &tag_bl, NULL, 1}, {TSDB_DATA_TYPE_INT, &tag_i, &tag_l, NULL, 1}}};
TAOS_STMT2_BIND params[2][2] = {
{{TSDB_DATA_TYPE_INT, &coli[0], &ilen[0], NULL, 2}, {TSDB_DATA_TYPE_TIMESTAMP, &ts[0], &t64_len[0], NULL, 2}},
{{TSDB_DATA_TYPE_INT, &coli_2[0], &ilen[0], NULL, 2},
{TSDB_DATA_TYPE_TIMESTAMP, &ts_2[0], &t64_len[0], NULL, 2}}};
TAOS_STMT2_BIND* tagv[2] = {&tags1[0], &tags2[0]};
TAOS_STMT2_BIND* paramv[2] = {&params1[0], &params2[0]};
char* tbname[2] = {"tb3", "tb4"};
TAOS_STMT2_BIND* tagv[2] = {&tags[0][0], &tags[1][0]};
TAOS_STMT2_BIND* paramv[2] = {&params[0][0], &params[1][0]};
char* tbname[2] = {"tb5", "tb6"};
TAOS_STMT2_BINDV bindv = {2, &tbname[0], &tagv[0], &paramv[0]};
code = taos_stmt2_bind_param(stmt, &bindv, -1);
checkError(stmt, code);
@ -1894,4 +1938,158 @@ TEST(stmt2Case, async_order) {
}
}
TEST(stmt2Case, rowformat_bind) {
TAOS* taos = taos_connect("localhost", "root", "taosdata", "", 0);
ASSERT_NE(taos, nullptr);
do_query(taos, "drop database if exists stmt2_testdb_16");
do_query(taos, "create database IF NOT EXISTS stmt2_testdb_16");
do_query(
taos,
"create stable stmt2_testdb_16.stb(ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 binary(8), c6 "
"smallint, c7 "
"tinyint, c8 bool, c9 nchar(8), c10 geometry(256))TAGS(tts timestamp, t1 int, t2 bigint, t3 float, t4 double, t5 "
"binary(8), t6 smallint, t7 tinyint, t8 bool, t9 nchar(8), t10 geometry(256))");
TAOS_STMT2_OPTION option = {0};
TAOS_STMT2* stmt = taos_stmt2_init(taos, &option);
ASSERT_NE(stmt, nullptr);
int code = 0;
uintptr_t c10len = 0;
struct {
int64_t c1;
int32_t c2;
int64_t c3;
float c4;
double c5;
unsigned char c6[8];
int16_t c7;
int8_t c8;
int8_t c9;
char c10[32];
} v = {1591060628000, 1, 2, 3.0, 4.0, "abcdef", 5, 6, 7, "ijnop"};
struct {
int32_t c1;
int32_t c2;
int32_t c3;
int32_t c4;
int32_t c5;
int32_t c6;
int32_t c7;
int32_t c8;
int32_t c9;
int32_t c10;
} v_len = {sizeof(int64_t), sizeof(int32_t),
sizeof(int64_t), sizeof(float),
sizeof(double), 8,
sizeof(int16_t), sizeof(int8_t),
sizeof(int8_t), 8};
TAOS_STMT2_BIND params[11];
params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
params[0].length = (int32_t*)&v_len.c1;
params[0].buffer = &v.c1;
params[0].is_null = NULL;
params[0].num = 1;
params[1].buffer_type = TSDB_DATA_TYPE_INT;
params[1].buffer = &v.c2;
params[1].length = (int32_t*)&v_len.c2;
params[1].is_null = NULL;
params[1].num = 1;
params[2].buffer_type = TSDB_DATA_TYPE_BIGINT;
params[2].buffer = &v.c3;
params[2].length = (int32_t*)&v_len.c3;
params[2].is_null = NULL;
params[2].num = 1;
params[3].buffer_type = TSDB_DATA_TYPE_FLOAT;
params[3].buffer = &v.c4;
params[3].length = (int32_t*)&v_len.c4;
params[3].is_null = NULL;
params[3].num = 1;
params[4].buffer_type = TSDB_DATA_TYPE_DOUBLE;
params[4].buffer = &v.c5;
params[4].length = (int32_t*)&v_len.c5;
params[4].is_null = NULL;
params[4].num = 1;
params[5].buffer_type = TSDB_DATA_TYPE_BINARY;
params[5].buffer = &v.c6;
params[5].length = (int32_t*)&v_len.c6;
params[5].is_null = NULL;
params[5].num = 1;
params[6].buffer_type = TSDB_DATA_TYPE_SMALLINT;
params[6].buffer = &v.c7;
params[6].length = (int32_t*)&v_len.c7;
params[6].is_null = NULL;
params[6].num = 1;
params[7].buffer_type = TSDB_DATA_TYPE_TINYINT;
params[7].buffer = &v.c8;
params[7].length = (int32_t*)&v_len.c8;
params[7].is_null = NULL;
params[7].num = 1;
params[8].buffer_type = TSDB_DATA_TYPE_BOOL;
params[8].buffer = &v.c9;
params[8].length = (int32_t*)&v_len.c9;
params[8].is_null = NULL;
params[8].num = 1;
params[9].buffer_type = TSDB_DATA_TYPE_NCHAR;
params[9].buffer = &v.c10;
params[9].length = (int32_t*)&v_len.c10;
params[9].is_null = NULL;
params[9].num = 1;
unsigned char* outputGeom1;
size_t size1;
initCtxMakePoint();
code = doMakePoint(1.000, 2.000, &outputGeom1, &size1);
checkError(stmt, code);
params[10].buffer_type = TSDB_DATA_TYPE_GEOMETRY;
params[10].buffer = outputGeom1;
params[10].length = (int32_t*)&size1;
params[10].is_null = NULL;
params[10].num = 1;
char* stmt_sql = "insert into stmt2_testdb_16.? using stb tags(?,?,?,?,?,?,?,?,?,?,?)values (?,?,?,?,?,?,?,?,?,?,?)";
code = taos_stmt2_prepare(stmt, stmt_sql, 0);
checkError(stmt, code);
char* tbname[1] = {"tb1"};
TAOS_STMT2_BIND* tags = &params[0];
TAOS_STMT2_BIND* cols = &params[0];
TAOS_STMT2_BINDV bindv = {1, &tbname[0], &tags, &cols};
code = taos_stmt2_bind_param(stmt, &bindv, -2);
checkError(stmt, code);
int affected_rows;
code = taos_stmt2_exec(stmt, &affected_rows);
checkError(stmt, code);
ASSERT_EQ(affected_rows, 1);
int64_t ts2 = 1591060628000;
params[0].buffer = &ts2;
code = taos_stmt2_bind_param(stmt, &bindv, -2);
checkError(stmt, code);
code = taos_stmt2_exec(stmt, &affected_rows);
checkError(stmt, code);
ASSERT_EQ(affected_rows, 1);
params[0].buffer = &ts2;
code = taos_stmt2_bind_param(stmt, &bindv, -1);
ASSERT_EQ(code, TSDB_CODE_TSC_STMT_API_ERROR);
geosFreeBuffer(outputGeom1);
taos_stmt2_close(stmt);
do_query(taos, "drop database if exists stmt2_testdb_16");
taos_close(taos);
}
#pragma GCC diagnostic pop

View File

@ -4686,6 +4686,8 @@ int32_t tSerializeSCompactDbReq(void *buf, int32_t bufLen, SCompactDbReq *pReq)
}
}
TAOS_CHECK_EXIT(tEncodeI8(&encoder, pReq->metaOnly));
tEndEncode(&encoder);
_exit:
@ -4729,6 +4731,12 @@ int32_t tDeserializeSCompactDbReq(void *buf, int32_t bufLen, SCompactDbReq *pReq
}
}
}
if (!tDecodeIsEnd(&decoder)) {
TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pReq->metaOnly));
} else {
pReq->metaOnly = false;
}
tEndDecode(&decoder);
_exit:
@ -7156,6 +7164,7 @@ int32_t tSerializeSCompactVnodeReq(void *buf, int32_t bufLen, SCompactVnodeReq *
TAOS_CHECK_EXIT(tEncodeI64(&encoder, pReq->tw.ekey));
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->compactId));
TAOS_CHECK_EXIT(tEncodeI8(&encoder, pReq->metaOnly));
tEndEncode(&encoder);
@ -7193,6 +7202,12 @@ int32_t tDeserializeSCompactVnodeReq(void *buf, int32_t bufLen, SCompactVnodeReq
TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->compactId));
}
if (!tDecodeIsEnd(&decoder)) {
TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pReq->metaOnly));
} else {
pReq->metaOnly = false;
}
tEndDecode(&decoder);
_exit:
tDecoderClear(&decoder);
@ -7658,6 +7673,46 @@ _exit:
void tFreeSBalanceVgroupReq(SBalanceVgroupReq *pReq) { FREESQL(); }
int32_t tSerializeSAssignLeaderReq(void *buf, int32_t bufLen, SAssignLeaderReq *pReq) {
SEncoder encoder = {0};
int32_t code = 0;
int32_t lino;
int32_t tlen;
tEncoderInit(&encoder, buf, bufLen);
TAOS_CHECK_EXIT(tStartEncode(&encoder));
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->useless));
ENCODESQL();
tEndEncode(&encoder);
_exit:
if (code) {
tlen = code;
} else {
tlen = encoder.pos;
}
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSAssignLeaderReq(void *buf, int32_t bufLen, SAssignLeaderReq *pReq) {
SDecoder decoder = {0};
int32_t code = 0;
int32_t lino;
tDecoderInit(&decoder, buf, bufLen);
TAOS_CHECK_EXIT(tStartDecode(&decoder));
TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->useless));
DECODESQL();
tEndDecode(&decoder);
_exit:
tDecoderClear(&decoder);
return code;
}
void tFreeSAssignLeaderReq(SAssignLeaderReq *pReq) { FREESQL(); }
int32_t tSerializeSBalanceVgroupLeaderReq(void *buf, int32_t bufLen, SBalanceVgroupLeaderReq *pReq) {
SEncoder encoder = {0};
int32_t code = 0;
@ -8184,6 +8239,7 @@ int32_t tSerializeSVArbSetAssignedLeaderReq(void *buf, int32_t bufLen, SVArbSetA
TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->arbToken));
TAOS_CHECK_EXIT(tEncodeI64(&encoder, pReq->arbTerm));
TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->memberToken));
TAOS_CHECK_EXIT(tEncodeI8(&encoder, pReq->force));
tEndEncode(&encoder);
@ -8213,6 +8269,9 @@ int32_t tDeserializeSVArbSetAssignedLeaderReq(void *buf, int32_t bufLen, SVArbSe
TAOS_CHECK_EXIT(terrno);
}
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->memberToken));
if (!tDecodeIsEnd(&decoder)) {
TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pReq->force));
}
tEndDecode(&decoder);

View File

@ -3269,7 +3269,7 @@ int32_t tRowBuildFromBind2(SBindInfo2 *infos, int32_t numOfInfos, bool infoSorte
}
if (!infoSorted) {
taosqsort_r(infos, numOfInfos, sizeof(SBindInfo), NULL, tBindInfoCompare);
taosqsort_r(infos, numOfInfos, sizeof(SBindInfo2), NULL, tBindInfoCompare);
}
int32_t code = 0;

View File

@ -170,8 +170,6 @@ static void dmSetSignalHandle() {
#endif
}
extern bool generateNewMeta;
static int32_t dmParseArgs(int32_t argc, char const *argv[]) {
global.startTime = taosGetTimestampMs();
@ -210,8 +208,6 @@ static int32_t dmParseArgs(int32_t argc, char const *argv[]) {
global.dumpSdb = true;
} else if (strcmp(argv[i], "-dTxn") == 0) {
global.deleteTrans = true;
} else if (strcmp(argv[i], "-r") == 0) {
generateNewMeta = true;
} else if (strcmp(argv[i], "-E") == 0) {
if (i < argc - 1) {
if (strlen(argv[++i]) >= PATH_MAX) {

View File

@ -160,6 +160,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_MND_MERGE_VGROUP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_SPLIT_VGROUP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_BALANCE_VGROUP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_ARB_ASSIGN_LEADER, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_BALANCE_VGROUP_LEADER, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_CREATE_FUNC, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_RETRIEVE_FUNC, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;

View File

@ -116,7 +116,7 @@ void mndReleaseStream(SMnode *pMnode, SStreamObj *pStream);
int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb);
int32_t mndPersistStream(STrans *pTrans, SStreamObj *pStream);
int32_t mndStreamRegisterTrans(STrans *pTrans, const char *pTransName, int64_t streamId);
int32_t mndStreamClearFinishedTrans(SMnode *pMnode, int32_t *pNumOfActiveChkpt);
int32_t mndStreamClearFinishedTrans(SMnode *pMnode, int32_t *pNumOfActiveChkpt, SArray*pLongChkptTrans);
int32_t mndStreamTransConflictCheck(SMnode *pMnode, int64_t streamId, const char *pTransName, bool lock);
int32_t mndStreamGetRelTrans(SMnode *pMnode, int64_t streamId);
@ -159,6 +159,7 @@ void removeTasksInBuf(SArray *pTaskIds, SStreamExecInfo *pExecInfo);
int32_t mndFindChangedNodeInfo(SMnode *pMnode, const SArray *pPrevNodeList, const SArray *pNodeList,
SVgroupChangeInfo *pInfo);
void killAllCheckpointTrans(SMnode *pMnode, SVgroupChangeInfo *pChangeInfo);
void killChkptAndResetStreamTask(SMnode *pMnode, SArray *pLongChkpts);
bool isNodeUpdateTransActive();
int32_t createStreamTaskIter(SStreamObj *pStream, SStreamTaskIter **pIter);

View File

@ -47,7 +47,7 @@ int32_t mndSetMoveVgroupsInfoToTrans(SMnode *, STrans *pTrans, int32_t dropDnode
int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb, SDbObj *pNewDb, SVgObj *pVgroup,
SArray *pArray, SVgObj* pNewVgroup);
int32_t mndBuildCompactVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int64_t compactTs,
STimeWindow tw);
STimeWindow tw, bool metaOnly);
int32_t mndBuildRaftAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb, SDbObj *pNewDb, SVgObj *pVgroup,
SArray *pArray);

View File

@ -45,6 +45,7 @@ static int32_t mndProcessArbCheckSyncRsp(SRpcMsg *pRsp);
static int32_t mndProcessArbSetAssignedLeaderRsp(SRpcMsg *pRsp);
static int32_t mndRetrieveArbGroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows);
static void mndCancelGetNextArbGroup(SMnode *pMnode, void *pIter);
static int32_t mndProcessAssignLeaderMsg(SRpcMsg *pReq);
static int32_t mndArbCheckToken(const char *token1, const char *token2) {
if (token1 == NULL || token2 == NULL) return -1;
@ -70,6 +71,7 @@ int32_t mndInitArbGroup(SMnode *pMnode) {
mndSetMsgHandle(pMnode, TDMT_VND_ARB_HEARTBEAT_RSP, mndProcessArbHbRsp);
mndSetMsgHandle(pMnode, TDMT_VND_ARB_CHECK_SYNC_RSP, mndProcessArbCheckSyncRsp);
mndSetMsgHandle(pMnode, TDMT_SYNC_SET_ASSIGNED_LEADER_RSP, mndProcessArbSetAssignedLeaderRsp);
mndSetMsgHandle(pMnode, TDMT_MND_ARB_ASSIGN_LEADER, mndProcessAssignLeaderMsg);
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_ARBGROUP, mndRetrieveArbGroups);
mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_ARBGROUP, mndCancelGetNextArbGroup);
@ -535,11 +537,12 @@ static bool mndCheckArbMemberHbTimeout(SArbGroup *pArbGroup, int32_t index, int6
}
static void *mndBuildArbSetAssignedLeaderReq(int32_t *pContLen, int32_t vgId, char *arbToken, int64_t arbTerm,
char *memberToken) {
char *memberToken, bool force) {
SVArbSetAssignedLeaderReq req = {0};
req.arbToken = arbToken;
req.arbTerm = arbTerm;
req.memberToken = memberToken;
if (force) req.force = 1;
int32_t reqLen = tSerializeSVArbSetAssignedLeaderReq(NULL, 0, &req);
int32_t contLen = reqLen + sizeof(SMsgHead);
@ -559,10 +562,10 @@ static void *mndBuildArbSetAssignedLeaderReq(int32_t *pContLen, int32_t vgId, ch
}
static int32_t mndSendArbSetAssignedLeaderReq(SMnode *pMnode, int32_t dnodeId, int32_t vgId, char *arbToken,
int64_t term, char *memberToken) {
int64_t term, char *memberToken, bool force) {
int32_t code = 0;
int32_t contLen = 0;
void *pHead = mndBuildArbSetAssignedLeaderReq(&contLen, vgId, arbToken, term, memberToken);
void *pHead = mndBuildArbSetAssignedLeaderReq(&contLen, vgId, arbToken, term, memberToken, force);
if (!pHead) {
mError("vgId:%d, failed to build set-assigned request", vgId);
code = -1;
@ -649,6 +652,73 @@ void mndArbCheckSync(SArbGroup *pArbGroup, int64_t nowMs, ECheckSyncOp *pOp, SAr
*pOp = CHECK_SYNC_UPDATE;
}
static int32_t mndProcessAssignLeaderMsg(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
int32_t code = -1, lino = 0;
SArray *pArray = NULL;
void *pIter = NULL;
SSdb *pSdb = pMnode->pSdb;
SArbGroup *pArbGroup = NULL;
SAssignLeaderReq req = {0};
if (tDeserializeSAssignLeaderReq(pReq->pCont, pReq->contLen, &req) != 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
mInfo("begin to process assign leader");
char arbToken[TSDB_ARB_TOKEN_SIZE];
TAOS_CHECK_EXIT(mndGetArbToken(pMnode, arbToken));
int64_t term = mndGetTerm(pMnode);
if (term < 0) {
mError("arb failed to get term since %s", terrstr());
code = -1;
if (terrno != 0) code = terrno;
TAOS_RETURN(code);
}
while (1) {
pIter = sdbFetch(pSdb, SDB_ARBGROUP, pIter, (void **)&pArbGroup);
if (pIter == NULL) break;
SArbGroup arbGroupDup = {0};
(void)taosThreadMutexLock(&pArbGroup->mutex);
mndArbGroupDupObj(pArbGroup, &arbGroupDup);
(void)taosThreadMutexUnlock(&pArbGroup->mutex);
sdbRelease(pSdb, pArbGroup);
int32_t dnodeId = 0;
for (int32_t i = 0; i < 2; i++) {
SDnodeObj *pDnode = mndAcquireDnode(pMnode, arbGroupDup.members[i].info.dnodeId);
bool isonline = mndIsDnodeOnline(pDnode, taosGetTimestampMs());
mndReleaseDnode(pMnode, pDnode);
if (isonline) {
dnodeId = arbGroupDup.members[i].info.dnodeId;
break;
}
}
(void)mndSendArbSetAssignedLeaderReq(pMnode, dnodeId, arbGroupDup.vgId, arbToken, term, "", true);
mInfo("vgId:%d, arb send set assigned leader to dnodeId:%d", arbGroupDup.vgId, dnodeId);
}
code = 0;
// auditRecord(pReq, pMnode->clusterId, "assignLeader", "", "", req.sql, req.sqlLen);
_exit:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
mError("failed to assign leader since %s", tstrerror(code));
}
tFreeSAssignLeaderReq(&req);
TAOS_RETURN(code);
}
static int32_t mndProcessArbCheckSyncTimer(SRpcMsg *pReq) {
int32_t code = 0, lino = 0;
SMnode *pMnode = pReq->info.node;
@ -701,7 +771,7 @@ static int32_t mndProcessArbCheckSyncTimer(SRpcMsg *pReq) {
mTrace("vgId:%d, arb skip to send msg by check sync", vgId);
break;
case CHECK_SYNC_SET_ASSIGNED_LEADER:
(void)mndSendArbSetAssignedLeaderReq(pMnode, assgndDnodeId, vgId, arbToken, term, pAssgndLeader->token);
(void)mndSendArbSetAssignedLeaderReq(pMnode, assgndDnodeId, vgId, arbToken, term, pAssgndLeader->token, false);
mInfo("vgId:%d, arb send set assigned leader to dnodeId:%d", vgId, assgndDnodeId);
break;
case CHECK_SYNC_CHECK_SYNC:
@ -1361,7 +1431,7 @@ static int32_t mndRetrieveArbGroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
char strCheckSyncCode[100] = {0};
char bufUpdateTime[40] = {0};
(void)formatTimestamp(bufUpdateTime, pGroup->updateTimeMs, TSDB_TIME_PRECISION_MILLI);
tsnprintf(strCheckSyncCode, 100, "%s(%s)", tstrerror(pGroup->code), bufUpdateTime);
(void)tsnprintf(strCheckSyncCode, 100, "%s(%s)", tstrerror(pGroup->code), bufUpdateTime);
char checkSyncCode[100 + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(checkSyncCode, strCheckSyncCode, 100 + VARSTR_HEADER_SIZE);

View File

@ -927,7 +927,8 @@ static int32_t mndCompactDispatchAudit(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pD
return 0;
}
extern int32_t mndCompactDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, STimeWindow tw, SArray *vgroupIds);
extern int32_t mndCompactDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, STimeWindow tw, SArray *vgroupIds,
bool metaOnly);
static int32_t mndCompactDispatch(SRpcMsg *pReq) {
int32_t code = 0;
SMnode *pMnode = pReq->info.node;
@ -982,7 +983,7 @@ static int32_t mndCompactDispatch(SRpcMsg *pReq) {
.skey = convertTimePrecision(curMs + compactStartTime * 60000LL, TSDB_TIME_PRECISION_MILLI, pDb->cfg.precision),
.ekey = convertTimePrecision(curMs + compactEndTime * 60000LL, TSDB_TIME_PRECISION_MILLI, pDb->cfg.precision)};
if ((code = mndCompactDb(pMnode, NULL, pDb, tw, NULL)) == 0) {
if ((code = mndCompactDb(pMnode, NULL, pDb, tw, NULL, false)) == 0) {
mInfo("db:%p,%s, succeed to dispatch compact with range:[%" PRIi64 ",%" PRIi64 "], interval:%dm, start:%" PRIi64
"m, end:%" PRIi64 "m, offset:%" PRIi8 "h",
pDb, pDb->name, tw.skey, tw.ekey, pDb->cfg.compactInterval, compactStartTime, compactEndTime,

View File

@ -1339,17 +1339,47 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) {
SStreamObj *pStream = NULL;
int32_t code = 0;
int32_t numOfCheckpointTrans = 0;
SArray *pLongChkpts = NULL;
SArray *pList = NULL;
int64_t now = taosGetTimestampMs();
if ((code = mndCheckTaskAndNodeStatus(pMnode)) != 0) {
return TSDB_CODE_STREAM_TASK_IVLD_STATUS;
}
SArray *pList = taosArrayInit(4, sizeof(SCheckpointInterval));
pList = taosArrayInit(4, sizeof(SCheckpointInterval));
if (pList == NULL) {
mError("failed to init chkptInterval info, not handle stream checkpoint, code:%s", tstrerror(terrno));
return terrno;
}
int64_t now = taosGetTimestampMs();
pLongChkpts = taosArrayInit(4, sizeof(SStreamTransInfo));
if (pLongChkpts == NULL) {
mError("failed to init long checkpoint list, not handle stream checkpoint, code:%s", tstrerror(terrno));
taosArrayDestroy(pList);
return terrno;
}
// check if ongong checkpoint trans or long chkpt trans exist.
code = mndStreamClearFinishedTrans(pMnode, &numOfCheckpointTrans, pLongChkpts);
if (code) {
mError("failed to clear finish trans, code:%s", tstrerror(code));
taosArrayDestroy(pList);
taosArrayDestroy(pLongChkpts);
return code;
}
// kill long exec checkpoint and set task status
if (taosArrayGetSize(pLongChkpts) > 0) {
killChkptAndResetStreamTask(pMnode, pLongChkpts);
taosArrayDestroy(pList);
taosArrayDestroy(pLongChkpts);
return TSDB_CODE_SUCCESS;
}
taosArrayDestroy(pLongChkpts);
while ((pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream)) != NULL) {
int64_t duration = now - pStream->checkpointFreq;
@ -1385,12 +1415,6 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) {
}
taosArraySort(pList, streamWaitComparFn);
code = mndStreamClearFinishedTrans(pMnode, &numOfCheckpointTrans);
if (code) {
mError("failed to clear finish trans, code:%s", tstrerror(code));
taosArrayDestroy(pList);
return code;
}
int32_t numOfQual = taosArrayGetSize(pList);
if (numOfCheckpointTrans >= tsMaxConcurrentCheckpoint) {

View File

@ -16,6 +16,8 @@
#include "mndStream.h"
#include "mndTrans.h"
#define MAX_CHKPT_EXEC_ELAPSED (600*1000) // 600s
typedef struct SKeyInfo {
void *pKey;
int32_t keyLen;
@ -31,11 +33,12 @@ int32_t mndStreamRegisterTrans(STrans *pTrans, const char *pTransName, int64_t s
return taosHashPut(execInfo.transMgmt.pDBTrans, &streamId, sizeof(streamId), &info, sizeof(SStreamTransInfo));
}
int32_t mndStreamClearFinishedTrans(SMnode *pMnode, int32_t *pNumOfActiveChkpt) {
int32_t mndStreamClearFinishedTrans(SMnode *pMnode, int32_t *pNumOfActiveChkpt, SArray*pLongChkptTrans) {
size_t keyLen = 0;
void *pIter = NULL;
SArray *pList = taosArrayInit(4, sizeof(SKeyInfo));
int32_t numOfChkpt = 0;
int64_t now = taosGetTimestampMs();
if (pNumOfActiveChkpt != NULL) {
*pNumOfActiveChkpt = 0;
@ -63,6 +66,18 @@ int32_t mndStreamClearFinishedTrans(SMnode *pMnode, int32_t *pNumOfActiveChkpt)
} else {
if (strcmp(pEntry->name, MND_STREAM_CHECKPOINT_NAME) == 0) {
numOfChkpt++;
// last for 10min, kill it
int64_t dur = now - pTrans->createdTime;
if ((dur >= MAX_CHKPT_EXEC_ELAPSED) && (pLongChkptTrans != NULL)) {
mInfo("long chkpt transId:%d, start:%" PRId64
" exec duration:%.2fs, beyond threshold %.2f min, kill it and reset task status",
pTrans->id, pTrans->createdTime, dur / 1000.0, MAX_CHKPT_EXEC_ELAPSED/(1000*60.0));
void* p = taosArrayPush(pLongChkptTrans, pEntry);
if (p == NULL) {
mError("failed to add long checkpoint trans, transId:%d, code:%s", pEntry->transId, tstrerror(terrno));
}
}
}
mndReleaseTrans(pMnode, pTrans);
}
@ -101,7 +116,7 @@ static int32_t doStreamTransConflictCheck(SMnode *pMnode, int64_t streamId, cons
}
// if any task updates exist, any other stream trans are not allowed to be created
int32_t code = mndStreamClearFinishedTrans(pMnode, NULL);
int32_t code = mndStreamClearFinishedTrans(pMnode, NULL, NULL);
if (code) {
mError("failed to clear finish trans, code:%s, and continue", tstrerror(code));
}
@ -160,7 +175,7 @@ int32_t mndStreamGetRelTrans(SMnode *pMnode, int64_t streamId) {
return 0;
}
int32_t code = mndStreamClearFinishedTrans(pMnode, NULL);
int32_t code = mndStreamClearFinishedTrans(pMnode, NULL, NULL);
if (code) {
mError("failed to clear finish trans, code:%s", tstrerror(code));
}
@ -361,3 +376,37 @@ void killAllCheckpointTrans(SMnode *pMnode, SVgroupChangeInfo *pChangeInfo) {
mDebug("complete clear checkpoints in all Dbs");
}
void killChkptAndResetStreamTask(SMnode *pMnode, SArray* pLongChkpts) {
int32_t code = 0;
int64_t now = taosGetTimestampMs();
int32_t num = taosArrayGetSize(pLongChkpts);
mInfo("start to kill %d long checkpoint trans", num);
for(int32_t i = 0; i < num; ++i) {
SStreamTransInfo* pTrans = (SStreamTransInfo*) taosArrayGet(pLongChkpts, i);
if (pTrans == NULL) {
continue;
}
double el = (now - pTrans->startTime) / 1000.0;
mInfo("stream:0x%" PRIx64 " start to kill ongoing long checkpoint transId:%d, elapsed time:%.2fs. killed",
pTrans->streamId, pTrans->transId, el);
SStreamObj *p = NULL;
code = mndGetStreamObj(pMnode, pTrans->streamId, &p);
if (code == 0 && p != NULL) {
mndKillTransImpl(pMnode, pTrans->transId, p->sourceDb);
mDebug("stream:%s 0x%" PRIx64 " transId:%d checkpointId:%" PRId64 " create reset task trans", p->name,
pTrans->streamId, pTrans->transId, p->checkpointId);
code = mndCreateStreamResetStatusTrans(pMnode, p, p->checkpointId);
if (code) {
mError("stream:%s 0x%"PRIx64" failed to create reset stream task, code:%s", p->name, p->uid, tstrerror(code));
}
sdbRelease(pMnode->pSdb, p);
}
}
}

View File

@ -3632,11 +3632,12 @@ bool mndVgroupInDnode(SVgObj *pVgroup, int32_t dnodeId) {
}
static void *mndBuildCompactVnodeReq(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen, int64_t compactTs,
STimeWindow tw) {
STimeWindow tw, bool metaOnly) {
SCompactVnodeReq compactReq = {0};
compactReq.dbUid = pDb->uid;
compactReq.compactStartTime = compactTs;
compactReq.tw = tw;
compactReq.metaOnly = metaOnly;
tstrncpy(compactReq.db, pDb->name, TSDB_DB_FNAME_LEN);
mInfo("vgId:%d, build compact vnode config req", pVgroup->vgId);
@ -3667,13 +3668,13 @@ static void *mndBuildCompactVnodeReq(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgrou
}
static int32_t mndAddCompactVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int64_t compactTs,
STimeWindow tw) {
STimeWindow tw, bool metaOnly) {
int32_t code = 0;
STransAction action = {0};
action.epSet = mndGetVgroupEpset(pMnode, pVgroup);
int32_t contLen = 0;
void *pReq = mndBuildCompactVnodeReq(pMnode, pDb, pVgroup, &contLen, compactTs, tw);
void *pReq = mndBuildCompactVnodeReq(pMnode, pDb, pVgroup, &contLen, compactTs, tw, metaOnly);
if (pReq == NULL) {
code = TSDB_CODE_MND_RETURN_VALUE_NULL;
if (terrno != 0) code = terrno;
@ -3693,7 +3694,7 @@ static int32_t mndAddCompactVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *
}
int32_t mndBuildCompactVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int64_t compactTs,
STimeWindow tw) {
TAOS_CHECK_RETURN(mndAddCompactVnodeAction(pMnode, pTrans, pDb, pVgroup, compactTs, tw));
STimeWindow tw, bool metaOnly) {
TAOS_CHECK_RETURN(mndAddCompactVnodeAction(pMnode, pTrans, pDb, pVgroup, compactTs, tw, metaOnly));
return 0;
}

View File

@ -96,6 +96,7 @@ if(TD_VNODE_PLUGINS)
vnode
PRIVATE
${TD_ENTERPRISE_DIR}/src/plugins/vnode/src/tsdbCompact.c
${TD_ENTERPRISE_DIR}/src/plugins/vnode/src/metaCompact.c
${TD_ENTERPRISE_DIR}/src/plugins/vnode/src/tsdbCompactMonitor.c
${TD_ENTERPRISE_DIR}/src/plugins/vnode/src/vnodeCompact.c
)

View File

@ -24,7 +24,6 @@
extern "C" {
#endif
typedef struct SMetaIdx SMetaIdx;
typedef struct SMetaDB SMetaDB;
typedef struct SMetaCache SMetaCache;
@ -103,8 +102,6 @@ struct SMeta {
// stream
TTB* pStreamDb;
SMetaIdx* pIdx;
SMetaCache* pCache;
};

View File

@ -168,7 +168,7 @@ int metaTtlFindExpired(SMeta* pMeta, int64_t timePointMs, SArray* tb
int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq, STableMetaRsp* pMetaRsp);
int metaUpdateChangeTimeWithLock(SMeta* pMeta, tb_uid_t uid, int64_t changeTimeMs);
SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock);
int64_t metaGetTableCreateTime(SMeta *pMeta, tb_uid_t uid, int lock);
int64_t metaGetTableCreateTime(SMeta* pMeta, tb_uid_t uid, int lock);
int32_t metaGetTbTSchemaNotNull(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock, STSchema** ppTSchema);
int32_t metaGetTbTSchemaMaybeNull(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock, STSchema** ppTSchema);
STSchema* metaGetTbTSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock);
@ -487,7 +487,13 @@ struct SVnode {
// commit variables
SVATaskID commitTask;
SMeta* pMeta;
struct {
TdThreadRwlock metaRWLock;
SMeta* pMeta;
SMeta* pNewMeta;
SVATaskID metaCompactTask;
};
SSma* pSma;
STsdb* pTsdb;
SWal* pWal;

View File

@ -135,12 +135,17 @@ int metaEncodeEntry(SEncoder *pCoder, const SMetaEntry *pME) {
return 0;
}
int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) {
int metaDecodeEntryImpl(SDecoder *pCoder, SMetaEntry *pME, bool headerOnly) {
TAOS_CHECK_RETURN(tStartDecode(pCoder));
TAOS_CHECK_RETURN(tDecodeI64(pCoder, &pME->version));
TAOS_CHECK_RETURN(tDecodeI8(pCoder, &pME->type));
TAOS_CHECK_RETURN(tDecodeI64(pCoder, &pME->uid));
if (headerOnly) {
tEndDecode(pCoder);
return 0;
}
if (pME->type > 0) {
TAOS_CHECK_RETURN(tDecodeCStr(pCoder, &pME->name));
@ -209,6 +214,8 @@ int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) {
return 0;
}
int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) { return metaDecodeEntryImpl(pCoder, pME, false); }
static int32_t metaCloneSchema(const SSchemaWrapper *pSrc, SSchemaWrapper *pDst) {
if (pSrc == NULL || pDst == NULL) {
return TSDB_CODE_INVALID_PARA;

View File

@ -133,7 +133,7 @@ static void doScan(SMeta *pMeta) {
}
}
static int32_t metaOpenImpl(SVnode *pVnode, SMeta **ppMeta, const char *metaDir, int8_t rollback) {
int32_t metaOpenImpl(SVnode *pVnode, SMeta **ppMeta, const char *metaDir, int8_t rollback) {
SMeta *pMeta = NULL;
int32_t code = 0;
int32_t lino;
@ -251,187 +251,35 @@ _exit:
return code;
}
bool generateNewMeta = false;
static int32_t metaGenerateNewMeta(SMeta **ppMeta) {
SMeta *pNewMeta = NULL;
SMeta *pMeta = *ppMeta;
SVnode *pVnode = pMeta->pVnode;
metaInfo("vgId:%d start to generate new meta", TD_VID(pMeta->pVnode));
// Open a new meta for orgainzation
int32_t code = metaOpenImpl(pMeta->pVnode, &pNewMeta, VNODE_META_TMP_DIR, false);
if (code) {
return code;
}
code = metaBegin(pNewMeta, META_BEGIN_HEAP_NIL);
if (code) {
return code;
}
// i == 0, scan super table
// i == 1, scan normal table and child table
for (int i = 0; i < 2; i++) {
TBC *uidCursor = NULL;
int32_t counter = 0;
code = tdbTbcOpen(pMeta->pUidIdx, &uidCursor, NULL);
if (code) {
metaError("vgId:%d failed to open uid index cursor, reason:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
code = tdbTbcMoveToFirst(uidCursor);
if (code) {
metaError("vgId:%d failed to move to first, reason:%s", TD_VID(pVnode), tstrerror(code));
tdbTbcClose(uidCursor);
return code;
}
for (;;) {
const void *pKey;
int kLen;
const void *pVal;
int vLen;
if (tdbTbcGet(uidCursor, &pKey, &kLen, &pVal, &vLen) < 0) {
break;
}
tb_uid_t uid = *(tb_uid_t *)pKey;
SUidIdxVal *pUidIdxVal = (SUidIdxVal *)pVal;
if ((i == 0 && (pUidIdxVal->suid && pUidIdxVal->suid == uid)) // super table
|| (i == 1 && (pUidIdxVal->suid == 0 || pUidIdxVal->suid != uid)) // normal table and child table
) {
counter++;
if (i == 0) {
metaInfo("vgId:%d counter:%d new meta handle %s table uid:%" PRId64, TD_VID(pVnode), counter, "super", uid);
} else {
metaInfo("vgId:%d counter:%d new meta handle %s table uid:%" PRId64, TD_VID(pVnode), counter,
pUidIdxVal->suid == 0 ? "normal" : "child", uid);
}
// fetch table entry
void *value = NULL;
int valueSize = 0;
if (tdbTbGet(pMeta->pTbDb,
&(STbDbKey){
.version = pUidIdxVal->version,
.uid = uid,
},
sizeof(uid), &value, &valueSize) == 0) {
SDecoder dc = {0};
SMetaEntry me = {0};
tDecoderInit(&dc, value, valueSize);
if (metaDecodeEntry(&dc, &me) == 0) {
if (me.type == TSDB_CHILD_TABLE &&
tdbTbGet(pMeta->pUidIdx, &me.ctbEntry.suid, sizeof(me.ctbEntry.suid), NULL, NULL) != 0) {
metaError("vgId:%d failed to get super table uid:%" PRId64 " for child table uid:%" PRId64,
TD_VID(pVnode), me.ctbEntry.suid, uid);
} else if (metaHandleEntry2(pNewMeta, &me) != 0) {
metaError("vgId:%d failed to handle entry, uid:%" PRId64, TD_VID(pVnode), uid);
}
}
tDecoderClear(&dc);
}
tdbFree(value);
}
code = tdbTbcMoveToNext(uidCursor);
if (code) {
metaError("vgId:%d failed to move to next, reason:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
}
tdbTbcClose(uidCursor);
}
code = metaCommit(pNewMeta, pNewMeta->txn);
if (code) {
metaError("vgId:%d failed to commit, reason:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
code = metaFinishCommit(pNewMeta, pNewMeta->txn);
if (code) {
metaError("vgId:%d failed to finish commit, reason:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
if ((code = metaBegin(pNewMeta, META_BEGIN_HEAP_NIL)) != 0) {
metaError("vgId:%d failed to begin new meta, reason:%s", TD_VID(pVnode), tstrerror(code));
}
metaClose(&pNewMeta);
metaInfo("vgId:%d finish to generate new meta", TD_VID(pVnode));
return 0;
void vnodeGetMetaPath(SVnode *pVnode, const char *metaDir, char *fname) {
vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, fname, TSDB_FILENAME_LEN);
int32_t offset = strlen(fname);
snprintf(fname + offset, TSDB_FILENAME_LEN - offset - 1, "%s%s", TD_DIRSEP, metaDir);
}
int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) {
if (generateNewMeta) {
char path[TSDB_FILENAME_LEN] = {0};
char oldMetaPath[TSDB_FILENAME_LEN] = {0};
char newMetaPath[TSDB_FILENAME_LEN] = {0};
char backupMetaPath[TSDB_FILENAME_LEN] = {0};
int32_t code = TSDB_CODE_SUCCESS;
char metaDir[TSDB_FILENAME_LEN] = {0};
char metaTempDir[TSDB_FILENAME_LEN] = {0};
vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, path, TSDB_FILENAME_LEN);
snprintf(oldMetaPath, sizeof(oldMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_DIR);
snprintf(newMetaPath, sizeof(newMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_TMP_DIR);
snprintf(backupMetaPath, sizeof(backupMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_BACKUP_DIR);
vnodeGetMetaPath(pVnode, VNODE_META_DIR, metaDir);
vnodeGetMetaPath(pVnode, VNODE_META_TMP_DIR, metaTempDir);
bool oldMetaExist = taosCheckExistFile(oldMetaPath);
bool newMetaExist = taosCheckExistFile(newMetaPath);
bool backupMetaExist = taosCheckExistFile(backupMetaPath);
if ((!backupMetaExist && !oldMetaExist && newMetaExist) // case 2
|| (backupMetaExist && !oldMetaExist && !newMetaExist) // case 4
|| (backupMetaExist && oldMetaExist && newMetaExist) // case 8
) {
metaError("vgId:%d invalid meta state, please check", TD_VID(pVnode));
return TSDB_CODE_FAILED;
} else if ((backupMetaExist && oldMetaExist && !newMetaExist) // case 7
|| (!backupMetaExist && !oldMetaExist && !newMetaExist) // case 1
) {
return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback);
} else if (backupMetaExist && !oldMetaExist && newMetaExist) {
if (taosRenameFile(newMetaPath, oldMetaPath) != 0) {
metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno));
return terrno;
}
return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback);
} else {
int32_t code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback);
if (code) {
return code;
}
code = metaGenerateNewMeta(ppMeta);
if (code) {
metaError("vgId:%d failed to generate new meta, reason:%s", TD_VID(pVnode), tstrerror(code));
}
metaClose(ppMeta);
if (taosRenameFile(oldMetaPath, backupMetaPath) != 0) {
metaError("vgId:%d failed to rename old meta to backup, reason:%s", TD_VID(pVnode), tstrerror(terrno));
return terrno;
}
// rename the new meta to old meta
if (taosRenameFile(newMetaPath, oldMetaPath) != 0) {
metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno));
return terrno;
}
code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, false);
if (code) {
metaError("vgId:%d failed to open new meta, reason:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
// Check file states
if (!taosCheckExistFile(metaDir) && taosCheckExistFile(metaTempDir)) {
code = taosRenameFile(metaTempDir, metaDir);
if (code) {
metaError("vgId:%d, %s failed at %s:%d since %s: rename %s to %s failed", TD_VID(pVnode), __func__, __FILE__,
__LINE__, tstrerror(code), metaTempDir, metaDir);
return code;
}
}
} else {
return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback);
// Do open meta
code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback);
if (code) {
metaError("vgId:%d, %s failed at %s:%d since %s", TD_VID(pVnode), __func__, __FILE__, __LINE__, tstrerror(code));
return code;
}
return TSDB_CODE_SUCCESS;

View File

@ -21,6 +21,7 @@ struct SMetaSnapReader {
int64_t sver;
int64_t ever;
TBC* pTbc;
int32_t iLoop;
};
int32_t metaSnapReaderOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapReader** ppReader) {
@ -65,6 +66,22 @@ void metaSnapReaderClose(SMetaSnapReader** ppReader) {
}
}
extern int metaDecodeEntryImpl(SDecoder* pCoder, SMetaEntry* pME, bool headerOnly);
static int32_t metaDecodeEntryHeader(void* data, int32_t size, SMetaEntry* entry) {
SDecoder decoder = {0};
tDecoderInit(&decoder, (uint8_t*)data, size);
int32_t code = metaDecodeEntryImpl(&decoder, entry, true);
if (code) {
tDecoderClear(&decoder);
return code;
}
tDecoderClear(&decoder);
return 0;
}
int32_t metaSnapRead(SMetaSnapReader* pReader, uint8_t** ppData) {
int32_t code = 0;
const void* pKey = NULL;
@ -72,19 +89,47 @@ int32_t metaSnapRead(SMetaSnapReader* pReader, uint8_t** ppData) {
int32_t nKey = 0;
int32_t nData = 0;
STbDbKey key;
int32_t c;
*ppData = NULL;
for (;;) {
if (tdbTbcGet(pReader->pTbc, &pKey, &nKey, &pData, &nData)) {
while (pReader->iLoop < 2) {
if (tdbTbcGet(pReader->pTbc, &pKey, &nKey, &pData, &nData) != 0 || ((STbDbKey*)pKey)->version > pReader->ever) {
pReader->iLoop++;
// Reopen the cursor to read from the beginning
tdbTbcClose(pReader->pTbc);
pReader->pTbc = NULL;
code = tdbTbcOpen(pReader->pMeta->pTbDb, &pReader->pTbc, NULL);
if (code) {
metaError("vgId:%d, %s failed at %s:%d since %s", TD_VID(pReader->pMeta->pVnode), __func__, __FILE__, __LINE__,
tstrerror(code));
goto _exit;
}
code = tdbTbcMoveTo(pReader->pTbc, &(STbDbKey){.version = pReader->sver, .uid = INT64_MIN}, sizeof(STbDbKey), &c);
if (code) {
metaError("vgId:%d, %s failed at %s:%d since %s", TD_VID(pReader->pMeta->pVnode), __func__, __FILE__, __LINE__,
tstrerror(code));
goto _exit;
}
continue;
}
// Decode meta entry
SMetaEntry entry = {0};
code = metaDecodeEntryHeader((void*)pData, nData, &entry);
if (code) {
metaError("vgId:%d, %s failed at %s:%d since %s", TD_VID(pReader->pMeta->pVnode), __func__, __FILE__, __LINE__,
tstrerror(code));
goto _exit;
}
key = ((STbDbKey*)pKey)[0];
if (key.version > pReader->ever) {
goto _exit;
}
if (key.version < pReader->sver) {
if (key.version < pReader->sver //
|| (pReader->iLoop == 0 && TABS(entry.type) != TSDB_SUPER_TABLE) // First loop send super table entry
|| (pReader->iLoop == 1 && TABS(entry.type) == TSDB_SUPER_TABLE) // Second loop send non-super table entry
) {
if (tdbTbcMoveToNext(pReader->pTbc) != 0) {
metaTrace("vgId:%d, vnode snapshot meta read data done", TD_VID(pReader->pMeta->pVnode));
}

View File

@ -449,6 +449,7 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC
}
// open meta
(void)taosThreadRwlockInit(&pVnode->metaRWLock, NULL);
vInfo("vgId:%d, start to open vnode meta", TD_VID(pVnode));
if (metaOpen(pVnode, &pVnode->pMeta, rollback) < 0) {
vError("vgId:%d, failed to open vnode meta since %s", TD_VID(pVnode), tstrerror(terrno));
@ -548,6 +549,7 @@ _err:
if (pVnode->pMeta) metaClose(&pVnode->pMeta);
if (pVnode->freeList) vnodeCloseBufPool(pVnode);
(void)taosThreadRwlockDestroy(&pVnode->metaRWLock);
taosMemoryFree(pVnode);
return NULL;
}

View File

@ -49,7 +49,7 @@ int32_t fillTableColCmpr(SMetaReader *reader, SSchemaExt *pExt, int32_t numOfCol
return 0;
}
void vnodePrintTableMeta(STableMetaRsp* pMeta) {
void vnodePrintTableMeta(STableMetaRsp *pMeta) {
if (!(qDebugFlag & DEBUG_DEBUG)) {
return;
}
@ -70,14 +70,13 @@ void vnodePrintTableMeta(STableMetaRsp* pMeta) {
qDebug("sysInfo:%d", pMeta->sysInfo);
if (pMeta->pSchemas) {
for (int32_t i = 0; i < (pMeta->numOfColumns + pMeta->numOfTags); ++i) {
SSchema* pSchema = pMeta->pSchemas + i;
qDebug("%d col/tag: type:%d, flags:%d, colId:%d, bytes:%d, name:%s", i, pSchema->type, pSchema->flags, pSchema->colId, pSchema->bytes, pSchema->name);
SSchema *pSchema = pMeta->pSchemas + i;
qDebug("%d col/tag: type:%d, flags:%d, colId:%d, bytes:%d, name:%s", i, pSchema->type, pSchema->flags,
pSchema->colId, pSchema->bytes, pSchema->name);
}
}
}
int32_t vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg, bool direct) {
STableInfoReq infoReq = {0};
STableMetaRsp metaRsp = {0};
@ -528,6 +527,13 @@ _exit:
return code;
}
#define VNODE_DO_META_QUERY(pVnode, cmd) \
do { \
(void)taosThreadRwlockRdlock(&(pVnode)->metaRWLock); \
cmd; \
(void)taosThreadRwlockUnlock(&(pVnode)->metaRWLock); \
} while (0)
int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad) {
SSyncState state = syncGetState(pVnode->sync);
pLoad->syncAppliedIndex = pVnode->state.applied;
@ -543,8 +549,8 @@ int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad) {
pLoad->learnerProgress = state.progress;
pLoad->cacheUsage = tsdbCacheGetUsage(pVnode);
pLoad->numOfCachedTables = tsdbCacheGetElems(pVnode);
pLoad->numOfTables = metaGetTbNum(pVnode->pMeta);
pLoad->numOfTimeSeries = metaGetTimeSeriesNum(pVnode->pMeta, 1);
VNODE_DO_META_QUERY(pVnode, pLoad->numOfTables = metaGetTbNum(pVnode->pMeta));
VNODE_DO_META_QUERY(pVnode, pLoad->numOfTimeSeries = metaGetTimeSeriesNum(pVnode->pMeta, 1));
pLoad->totalStorage = (int64_t)3 * 1073741824;
pLoad->compStorage = (int64_t)2 * 1073741824;
pLoad->pointsWritten = 100;

View File

@ -876,7 +876,7 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
}
pResult->info.rows = 1;
TSDB_CHECK_CODE(code, lino, _exit);
goto _exit;
}
if (pResult != pSrcBlock) {

View File

@ -251,6 +251,11 @@ int32_t updateEventWindowInfo(SStreamAggSupporter* pAggSup, SEventWindowInfo* pW
pWin->ekey = pTsData[i];
pWinInfo->pWinFlag->endFlag = ends[i];
} else if (pWin->ekey == pTsData[i]) {
if (pWinInfo->pWinFlag->endFlag == true && ends[i] == false) {
(*pWinRow) = i + 1 - start;
*pRebuild = true;
goto _end;
}
pWinInfo->pWinFlag->endFlag |= ends[i];
} else if (ends[i] && !pWinInfo->pWinFlag->endFlag) {
*pRebuild = true;

View File

@ -474,14 +474,12 @@ void destroyFlusedppPos(void* ppRes) {
}
void clearGroupResInfo(SGroupResInfo* pGroupResInfo) {
if (pGroupResInfo->freeItem) {
int32_t size = taosArrayGetSize(pGroupResInfo->pRows);
for (int32_t i = pGroupResInfo->index; i < size; i++) {
void* pPos = taosArrayGetP(pGroupResInfo->pRows, i);
destroyFlusedPos(pPos);
}
pGroupResInfo->freeItem = false;
int32_t size = taosArrayGetSize(pGroupResInfo->pRows);
for (int32_t i = pGroupResInfo->index; i < size; i++) {
void* pPos = taosArrayGetP(pGroupResInfo->pRows, i);
destroyFlusedPos(pPos);
}
pGroupResInfo->freeItem = false;
taosArrayDestroy(pGroupResInfo->pRows);
pGroupResInfo->pRows = NULL;
pGroupResInfo->index = 0;

View File

@ -199,6 +199,8 @@ const char* nodesNodeName(ENodeType type) {
return "ResetStreamStmt";
case QUERY_NODE_BALANCE_VGROUP_STMT:
return "BalanceVgroupStmt";
case QUERY_NODE_ASSIGN_LEADER_STMT:
return "AssignLeaderStmt";
case QUERY_NODE_BALANCE_VGROUP_LEADER_STMT:
return "BalanceVgroupLeaderStmt";
case QUERY_NODE_BALANCE_VGROUP_LEADER_DATABASE_STMT:
@ -8195,6 +8197,8 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
return dropStreamStmtToJson(pObj, pJson);
case QUERY_NODE_BALANCE_VGROUP_STMT:
return TSDB_CODE_SUCCESS; // SBalanceVgroupStmt has no fields to serialize.
case QUERY_NODE_ASSIGN_LEADER_STMT:
return TSDB_CODE_SUCCESS;
case QUERY_NODE_BALANCE_VGROUP_LEADER_STMT:
return TSDB_CODE_SUCCESS; // SBalanceVgroupLeaderStmt has no fields to serialize.
case QUERY_NODE_BALANCE_VGROUP_LEADER_DATABASE_STMT:
@ -8564,6 +8568,8 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
return jsonToDropStreamStmt(pJson, pObj);
case QUERY_NODE_BALANCE_VGROUP_STMT:
return TSDB_CODE_SUCCESS; // SBalanceVgroupStmt has no fields to deserialize.
case QUERY_NODE_ASSIGN_LEADER_STMT:
return TSDB_CODE_SUCCESS;
case QUERY_NODE_BALANCE_VGROUP_LEADER_STMT:
return TSDB_CODE_SUCCESS;
case QUERY_NODE_BALANCE_VGROUP_LEADER_DATABASE_STMT:

View File

@ -625,6 +625,9 @@ int32_t nodesMakeNode(ENodeType type, SNode** ppNodeOut) {
case QUERY_NODE_BALANCE_VGROUP_STMT:
code = makeNode(type, sizeof(SBalanceVgroupStmt), &pNode);
break;
case QUERY_NODE_ASSIGN_LEADER_STMT:
code = makeNode(type, sizeof(SAssignLeaderStmt), &pNode);
break;
case QUERY_NODE_BALANCE_VGROUP_LEADER_STMT:
code = makeNode(type, sizeof(SBalanceVgroupLeaderStmt), &pNode);
break;
@ -1507,6 +1510,7 @@ void nodesDestroyNode(SNode* pNode) {
case QUERY_NODE_RESUME_STREAM_STMT: // no pointer field
case QUERY_NODE_RESET_STREAM_STMT: // no pointer field
case QUERY_NODE_BALANCE_VGROUP_STMT: // no pointer field
case QUERY_NODE_ASSIGN_LEADER_STMT:
case QUERY_NODE_BALANCE_VGROUP_LEADER_STMT: // no pointer field
case QUERY_NODE_BALANCE_VGROUP_LEADER_DATABASE_STMT: // no pointer field
case QUERY_NODE_MERGE_VGROUP_STMT: // no pointer field

View File

@ -202,9 +202,9 @@ SNode* createAlterDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName, SNode*
SNode* createFlushDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName);
SNode* createTrimDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName, int32_t maxSpeed);
SNode* createS3MigrateDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName);
SNode* createCompactStmt(SAstCreateContext* pCxt, SToken* pDbName, SNode* pStart, SNode* pEnd);
SNode* createCompactStmt(SAstCreateContext* pCxt, SToken* pDbName, SNode* pStart, SNode* pEnd, bool metaOnly);
SNode* createCompactVgroupsStmt(SAstCreateContext* pCxt, SNode* pDbName, SNodeList* vgidList, SNode* pStart,
SNode* pEnd);
SNode* pEnd, bool metaOnly);
SNode* createDefaultTableOptions(SAstCreateContext* pCxt);
SNode* createAlterTableOptions(SAstCreateContext* pCxt);
SNode* setTableOption(SAstCreateContext* pCxt, SNode* pOptions, ETableOptionType type, void* pVal);
@ -309,6 +309,7 @@ SNode* createResetStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, STok
SNode* createKillStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pId);
SNode* createKillQueryStmt(SAstCreateContext* pCxt, const SToken* pQueryId);
SNode* createBalanceVgroupStmt(SAstCreateContext* pCxt);
SNode* createAssignLeaderStmt(SAstCreateContext* pCxt);
SNode* createBalanceVgroupLeaderStmt(SAstCreateContext* pCxt, const SToken* pVgId);
SNode* createBalanceVgroupLeaderDBNameStmt(SAstCreateContext* pCxt, const SToken* pDbName);
SNode* createMergeVgroupStmt(SAstCreateContext* pCxt, const SToken* pVgId1, const SToken* pVgId2);

View File

@ -242,8 +242,13 @@ cmd ::= ALTER DATABASE db_name(A) alter_db_options(B).
cmd ::= FLUSH DATABASE db_name(A). { pCxt->pRootNode = createFlushDatabaseStmt(pCxt, &A); }
cmd ::= TRIM DATABASE db_name(A) speed_opt(B). { pCxt->pRootNode = createTrimDatabaseStmt(pCxt, &A, B); }
cmd ::= S3MIGRATE DATABASE db_name(A). { pCxt->pRootNode = createS3MigrateDatabaseStmt(pCxt, &A); }
cmd ::= COMPACT DATABASE db_name(A) start_opt(B) end_opt(C). { pCxt->pRootNode = createCompactStmt(pCxt, &A, B, C); }
cmd ::= COMPACT db_name_cond_opt(A) VGROUPS IN NK_LP integer_list(B) NK_RP start_opt(C) end_opt(D). { pCxt->pRootNode = createCompactVgroupsStmt(pCxt, A, B, C, D); }
cmd ::= COMPACT DATABASE db_name(A) start_opt(B) end_opt(C) meta_only(D). { pCxt->pRootNode = createCompactStmt(pCxt, &A, B, C, D); }
cmd ::= COMPACT db_name_cond_opt(A) VGROUPS IN NK_LP integer_list(B) NK_RP start_opt(C) end_opt(D) meta_only(E). { pCxt->pRootNode = createCompactVgroupsStmt(pCxt, A, B, C, D, E); }
%type meta_only { bool }
%destructor meta_only { }
meta_only(A) ::= . { A = false; }
meta_only(A) ::= META_ONLY. { A = true; }
%type not_exists_opt { bool }
%destructor not_exists_opt { }
@ -866,6 +871,9 @@ cmd ::= KILL COMPACT NK_INTEGER(A).
/************************************************ merge/redistribute/ vgroup ******************************************/
cmd ::= BALANCE VGROUP. { pCxt->pRootNode = createBalanceVgroupStmt(pCxt); }
cmd ::= ASSIGN LEADER FORCE. { pCxt->pRootNode = createAssignLeaderStmt(pCxt); }
cmd ::= BALANCE VGROUP LEADER on_vgroup_id(A). { pCxt->pRootNode = createBalanceVgroupLeaderStmt(pCxt, &A); }
cmd ::= BALANCE VGROUP LEADER DATABASE db_name(A). { pCxt->pRootNode = createBalanceVgroupLeaderDBNameStmt(pCxt, &A); }
cmd ::= MERGE VGROUP NK_INTEGER(A) NK_INTEGER(B). { pCxt->pRootNode = createMergeVgroupStmt(pCxt, &A, &B); }

View File

@ -2212,7 +2212,7 @@ _err:
return NULL;
}
SNode* createCompactStmt(SAstCreateContext* pCxt, SToken* pDbName, SNode* pStart, SNode* pEnd) {
SNode* createCompactStmt(SAstCreateContext* pCxt, SToken* pDbName, SNode* pStart, SNode* pEnd, bool metaOnly) {
CHECK_PARSER_STATUS(pCxt);
CHECK_NAME(checkDbName(pCxt, pDbName, false));
SCompactDatabaseStmt* pStmt = NULL;
@ -2221,6 +2221,7 @@ SNode* createCompactStmt(SAstCreateContext* pCxt, SToken* pDbName, SNode* pStart
COPY_STRING_FORM_ID_TOKEN(pStmt->dbName, pDbName);
pStmt->pStart = pStart;
pStmt->pEnd = pEnd;
pStmt->metaOnly = metaOnly;
return (SNode*)pStmt;
_err:
nodesDestroyNode(pStart);
@ -2229,7 +2230,7 @@ _err:
}
SNode* createCompactVgroupsStmt(SAstCreateContext* pCxt, SNode* pDbName, SNodeList* vgidList, SNode* pStart,
SNode* pEnd) {
SNode* pEnd, bool metaOnly) {
CHECK_PARSER_STATUS(pCxt);
if (NULL == pDbName) {
snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen, "database not specified");
@ -2243,6 +2244,7 @@ SNode* createCompactVgroupsStmt(SAstCreateContext* pCxt, SNode* pDbName, SNodeLi
pStmt->vgidList = vgidList;
pStmt->pStart = pStart;
pStmt->pEnd = pEnd;
pStmt->metaOnly = metaOnly;
return (SNode*)pStmt;
_err:
nodesDestroyNode(pDbName);
@ -3927,6 +3929,16 @@ _err:
return NULL;
}
SNode* createAssignLeaderStmt(SAstCreateContext* pCxt) {
CHECK_PARSER_STATUS(pCxt);
SAssignLeaderStmt* pStmt = NULL;
pCxt->errCode = nodesMakeNode(QUERY_NODE_ASSIGN_LEADER_STMT, (SNode**)&pStmt);
CHECK_MAKE_NODE(pStmt);
return (SNode*)pStmt;
_err:
return NULL;
}
SNode* createBalanceVgroupLeaderStmt(SAstCreateContext* pCxt, const SToken* pVgId) {
CHECK_PARSER_STATUS(pCxt);
SBalanceVgroupLeaderStmt* pStmt = NULL;

View File

@ -54,8 +54,12 @@ int32_t qCloneCurrentTbData(STableDataCxt* pDataBlock, SSubmitTbData** pData) {
int32_t colNum = taosArrayGetSize(pNew->aCol);
for (int32_t i = 0; i < colNum; ++i) {
SColData* pCol = (SColData*)taosArrayGet(pNew->aCol, i);
tColDataDeepClear(pCol);
if (pDataBlock->pData->flags & SUBMIT_REQ_COLUMN_DATA_FORMAT) {
SColData* pCol = (SColData*)taosArrayGet(pNew->aCol, i);
tColDataDeepClear(pCol);
} else {
pNew->aCol = taosArrayInit(20, POINTER_BYTES);
}
}
return TSDB_CODE_SUCCESS;
@ -324,7 +328,7 @@ int32_t qBindStmtStbColsValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind
int16_t lastColId = -1;
bool colInOrder = true;
if (NULL == *pTSchema) {
if (NULL == pTSchema || NULL == *pTSchema) {
*pTSchema = tBuildTSchema(pSchema, pDataBlock->pMeta->tableInfo.numOfColumns, pDataBlock->pMeta->sversion);
}
@ -693,7 +697,7 @@ int32_t qBindStmtStbColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bin
bool colInOrder = true;
int ncharColNums = 0;
if (NULL == *pTSchema) {
if (NULL == pTSchema || NULL == *pTSchema) {
*pTSchema = tBuildTSchema(pSchema, pDataBlock->pMeta->tableInfo.numOfColumns, pDataBlock->pMeta->sversion);
}
@ -739,6 +743,22 @@ int32_t qBindStmtStbColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bin
goto _return;
}
pBindInfos[c].bind = taosArrayGetLast(ncharBinds);
} else if (TSDB_DATA_TYPE_GEOMETRY == pColSchema->type) {
code = initCtxAsText();
if (code) {
qError("geometry init failed:%s", tstrerror(code));
goto _return;
}
uint8_t* buf = bind[c].buffer;
for (int j = 0; j < bind[c].num; j++) {
code = checkWKB(buf, bind[c].length[j]);
if (code) {
qError("geometry data must be in WKB format");
goto _return;
}
buf += bind[c].length[j];
}
pBindInfos[c].bind = bind + c;
} else {
pBindInfos[c].bind = bind + c;
}
@ -816,7 +836,8 @@ static int32_t convertStmtNcharCol2(SMsgBuf* pMsgBuf, SSchema* pSchema, TAOS_STM
return TSDB_CODE_SUCCESS;
}
int32_t qBindStmtColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bind, char* msgBuf, int32_t msgBufLen, void *charsetCxt) {
int32_t qBindStmtColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bind, char* msgBuf, int32_t msgBufLen,
void* charsetCxt) {
STableDataCxt* pDataBlock = (STableDataCxt*)pBlock;
SSchema* pSchema = getTableColumnSchema(pDataBlock->pMeta);
SBoundColInfo* boundInfo = &pDataBlock->boundColsInfo;
@ -834,7 +855,7 @@ int32_t qBindStmtColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bind,
goto _return;
}
if(boundInfo->pColIndex[c]==0){
if (boundInfo->pColIndex[c] == 0) {
pCol->cflag |= COL_IS_KEY;
}
@ -926,6 +947,94 @@ _return:
return code;
}
int32_t qBindStmt2RowValue(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bind, char* msgBuf, int32_t msgBufLen,
STSchema** pTSchema, SBindInfo2* pBindInfos, void* charsetCxt) {
STableDataCxt* pDataBlock = (STableDataCxt*)pBlock;
SSchema* pSchema = getTableColumnSchema(pDataBlock->pMeta);
SBoundColInfo* boundInfo = &pDataBlock->boundColsInfo;
SMsgBuf pBuf = {.buf = msgBuf, .len = msgBufLen};
int32_t rowNum = bind->num;
TAOS_STMT2_BIND ncharBind = {0};
TAOS_STMT2_BIND* pBind = NULL;
int32_t code = 0;
int16_t lastColId = -1;
bool colInOrder = true;
if (NULL == pTSchema || NULL == *pTSchema) {
*pTSchema = tBuildTSchema(pSchema, pDataBlock->pMeta->tableInfo.numOfColumns, pDataBlock->pMeta->sversion);
}
for (int c = 0; c < boundInfo->numOfBound; ++c) {
SSchema* pColSchema = &pSchema[boundInfo->pColIndex[c]];
if (pColSchema->colId <= lastColId) {
colInOrder = false;
} else {
lastColId = pColSchema->colId;
}
if (bind[c].num != rowNum) {
code = buildInvalidOperationMsg(&pBuf, "row number in each bind param should be the same");
goto _return;
}
if ((!(rowNum == 1 && bind[c].is_null && *bind[c].is_null)) &&
bind[c].buffer_type != pColSchema->type) { // for rowNum ==1 , connector may not set buffer_type
code = buildInvalidOperationMsg(&pBuf, "column type mis-match with buffer type");
goto _return;
}
if (TSDB_DATA_TYPE_NCHAR == pColSchema->type) {
code = convertStmtNcharCol2(&pBuf, pColSchema, bind + c, &ncharBind, charsetCxt);
if (code) {
goto _return;
}
pBindInfos[c].bind = &ncharBind;
} else if (TSDB_DATA_TYPE_GEOMETRY == pColSchema->type) {
code = initCtxAsText();
if (code) {
qError("geometry init failed:%s", tstrerror(code));
goto _return;
}
uint8_t *buf = bind[c].buffer;
for (int j = 0; j < bind[c].num; j++) {
code = checkWKB(buf, bind[c].length[j]);
if (code) {
qError("geometry data must be in WKB format");
goto _return;
}
buf += bind[c].length[j];
}
pBindInfos[c].bind = bind + c;
} else {
pBindInfos[c].bind = bind + c;
}
pBindInfos[c].columnId = pColSchema->colId;
pBindInfos[c].type = pColSchema->type;
pBindInfos[c].bytes = pColSchema->bytes;
if (code) {
goto _return;
}
}
pDataBlock->pData->flags &= ~SUBMIT_REQ_COLUMN_DATA_FORMAT;
if (pDataBlock->pData->pCreateTbReq != NULL) {
pDataBlock->pData->flags |= SUBMIT_REQ_AUTO_CREATE_TABLE;
}
code = tRowBuildFromBind2(pBindInfos, boundInfo->numOfBound, colInOrder, *pTSchema, pCols, &pDataBlock->ordered,
&pDataBlock->duplicateTs);
qDebug("stmt2 all %d columns bind %d rows data as row format", boundInfo->numOfBound, rowNum);
_return:
taosMemoryFree(ncharBind.buffer);
taosMemoryFree(ncharBind.length);
return code;
}
int32_t buildBoundFields(int32_t numOfBound, int16_t* boundColumns, SSchema* pSchema, int32_t* fieldNum,
TAOS_FIELD_E** fields, uint8_t timePrec) {
if (fields != NULL) {
@ -1114,15 +1223,19 @@ int32_t qResetStmtDataBlock(STableDataCxt* block, bool deepClear) {
int32_t colNum = taosArrayGetSize(pBlock->pData->aCol);
for (int32_t i = 0; i < colNum; ++i) {
SColData* pCol = (SColData*)taosArrayGet(pBlock->pData->aCol, i);
if (pCol == NULL) {
parserError("qResetStmtDataBlock column:%d is NULL", i);
return terrno;
}
if (deepClear) {
tColDataDeepClear(pCol);
if (pBlock->pData->flags & SUBMIT_REQ_COLUMN_DATA_FORMAT) {
SColData* pCol = (SColData*)taosArrayGet(pBlock->pData->aCol, i);
if (pCol == NULL) {
parserError("qResetStmtDataBlock column:%d is NULL", i);
return terrno;
}
if (deepClear) {
tColDataDeepClear(pCol);
} else {
tColDataClear(pCol);
}
} else {
tColDataClear(pCol);
pBlock->pData->aRowP = taosArrayInit(20, POINTER_BYTES);
}
}

View File

@ -360,7 +360,9 @@ static SKeyword keywordTable[] = {
{"ON_FAILURE", TK_ON_FAILURE},
{"NOTIFY_HISTORY", TK_NOTIFY_HISTORY},
{"REGEXP", TK_REGEXP},
{"TRUE_FOR", TK_TRUE_FOR}
{"ASSIGN", TK_ASSIGN},
{"TRUE_FOR", TK_TRUE_FOR},
{"META_ONLY", TK_META_ONLY}
};
// clang-format on

View File

@ -8934,6 +8934,10 @@ static int32_t fillCmdSql(STranslateContext* pCxt, int16_t msgType, void* pReq)
FILL_CMD_SQL(sql, sqlLen, pCmdReq, SBalanceVgroupReq, pReq);
break;
}
case TDMT_MND_ARB_ASSIGN_LEADER: {
FILL_CMD_SQL(sql, sqlLen, pCmdReq, SAssignLeaderReq, pReq);
break;
}
case TDMT_MND_REDISTRIBUTE_VGROUP: {
FILL_CMD_SQL(sql, sqlLen, pCmdReq, SRedistributeVgroupReq, pReq);
break;
@ -11315,9 +11319,11 @@ static int32_t translateCompactRange(STranslateContext* pCxt, const char* dbName
}
static int32_t translateCompactDb(STranslateContext* pCxt, SCompactDatabaseStmt* pStmt) {
SCompactDbReq compactReq = {0};
SName name;
int32_t code = TSDB_CODE_SUCCESS;
SCompactDbReq compactReq = {
.metaOnly = pStmt->metaOnly,
};
SName name;
int32_t code = TSDB_CODE_SUCCESS;
code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName));
if (TSDB_CODE_SUCCESS != code) return code;
@ -11384,7 +11390,9 @@ static int32_t translateVgroupList(STranslateContext* pCxt, SNodeList* vgroupLis
static int32_t translateCompactVgroups(STranslateContext* pCxt, SCompactVgroupsStmt* pStmt) {
int32_t code = TSDB_CODE_SUCCESS;
SName name;
SCompactDbReq req = {0};
SCompactDbReq req = {
.metaOnly = pStmt->metaOnly,
};
code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal,
strlen(((SValueNode*)pStmt->pDbName)->literal));
@ -13182,6 +13190,13 @@ static int32_t translateBalanceVgroup(STranslateContext* pCxt, SBalanceVgroupStm
return code;
}
static int32_t translateAssignLeader(STranslateContext* pCxt, SAssignLeaderStmt* pStmt) {
SAssignLeaderReq req = {0};
int32_t code = buildCmdMsg(pCxt, TDMT_MND_ARB_ASSIGN_LEADER, (FSerializeFunc)tSerializeSAssignLeaderReq, &req);
tFreeSAssignLeaderReq(&req);
return code;
}
static int32_t translateBalanceVgroupLeader(STranslateContext* pCxt, SBalanceVgroupLeaderStmt* pStmt) {
SBalanceVgroupLeaderReq req = {0};
req.vgId = pStmt->vgId;
@ -13939,6 +13954,9 @@ static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode) {
case QUERY_NODE_BALANCE_VGROUP_STMT:
code = translateBalanceVgroup(pCxt, (SBalanceVgroupStmt*)pNode);
break;
case QUERY_NODE_ASSIGN_LEADER_STMT:
code = translateAssignLeader(pCxt, (SAssignLeaderStmt*)pNode);
break;
case QUERY_NODE_BALANCE_VGROUP_LEADER_STMT:
code = translateBalanceVgroupLeader(pCxt, (SBalanceVgroupLeaderStmt*)pNode);
break;

View File

@ -777,7 +777,8 @@ static int32_t doStreamExecTask(SStreamTask* pTask) {
int32_t code = 0;
// merge multiple input data if possible in the input queue.
stDebug("s-task:%s start to extract data block from inputQ", id);
int64_t st = taosGetTimestampMs();
stDebug("s-task:%s start to extract data block from inputQ, ts:%" PRId64, id, st);
while (1) {
int32_t blockSize = 0;
@ -807,8 +808,6 @@ static int32_t doStreamExecTask(SStreamTask* pTask) {
return 0;
}
int64_t st = taosGetTimestampMs();
EExtractDataCode ret = streamTaskGetDataFromInputQ(pTask, &pInput, &numOfBlocks, &blockSize);
if (ret == EXEC_AFTER_IDLE) {
streamTaskSetIdleInfo(pTask, MIN_INVOKE_INTERVAL);
@ -825,6 +824,10 @@ static int32_t doStreamExecTask(SStreamTask* pTask) {
// dispatch checkpoint msg to all downstream tasks
int32_t type = pInput->type;
if (type == STREAM_INPUT__CHECKPOINT_TRIGGER) {
#if 0
// Injection error: for automatic kill long trans test
taosMsleep(50*1000);
#endif
code = streamProcessCheckpointTriggerBlock(pTask, (SStreamDataBlock*)pInput);
if (code != 0) {
stError("s-task:%s failed to process checkpoint-trigger block, code:%s", pTask->id.idStr, tstrerror(code));

View File

@ -334,10 +334,12 @@ int32_t syncBecomeAssignedLeader(SSyncNode* ths, SRpcMsg* pRpcMsg) {
ths->arbTerm = TMAX(req.arbTerm, ths->arbTerm);
if (strncmp(req.memberToken, ths->arbToken, TSDB_ARB_TOKEN_SIZE) != 0) {
sInfo("vgId:%d, skip to set assigned leader, token mismatch, local:%s, msg:%s", ths->vgId, ths->arbToken,
req.memberToken);
goto _OVER;
if (!req.force) {
if (strncmp(req.memberToken, ths->arbToken, TSDB_ARB_TOKEN_SIZE) != 0) {
sInfo("vgId:%d, skip to set assigned leader, token mismatch, local:%s, msg:%s", ths->vgId, ths->arbToken,
req.memberToken);
goto _OVER;
}
}
if (ths->state != TAOS_SYNC_STATE_ASSIGNED_LEADER) {

View File

@ -0,0 +1,123 @@
import taos
import sys
import os
import subprocess
import glob
import shutil
import time
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.srvCtl import *
from frame.caseBase import *
from frame import *
from frame.autogen import *
from frame import epath
# from frame.server.dnodes import *
# from frame.server.cluster import *
class TDTestCase(TBase):
def init(self, conn, logSql, replicaVar=1):
updatecfgDict = {'dDebugFlag':131}
super(TDTestCase, self).init(conn, logSql, replicaVar=1, checkColName="c1")
self.valgrind = 0
self.db = "test"
self.stb = "meters"
self.childtable_count = 10
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.execute('CREATE DATABASE db vgroups 1 replica 2;')
time.sleep(1)
tdSql.execute("use db;")
tdSql.execute("CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);")
tdSql.execute("CREATE TABLE d0 USING meters TAGS (\"California.SanFrancisco\", 2);");
count = 0
while count < 100:
tdSql.query("show arbgroups;")
if tdSql.getData(0, 4) == True:
break
tdLog.info("wait 1 seconds for is sync")
time.sleep(1)
count += 1
if count == 100:
tdLog.exit("arbgroup sync failed")
return
sc.dnodeStop(2)
sc.dnodeStop(3)
sc.dnodeStart(2)
count = 0
while count < 100:
tdSql.query("show db.vgroups;")
if(tdSql.getData(0, 4) == "candidate") or (tdSql.getData(0, 6) == "candidate"):
break
tdLog.info("wait 1 seconds for candidate")
time.sleep(1)
count += 1
if count == 100:
tdLog.exit("wait candidate failed")
return
tdSql.execute("ASSIGN LEADER FORCE;")
count = 0
while count < 100:
tdSql.query("show db.vgroups;")
if(tdSql.getData(0, 4) == "assigned ") or (tdSql.getData(0, 6) == "assigned "):
break
tdLog.info("wait 1 seconds for set assigned")
time.sleep(1)
count += 1
if count == 100:
tdLog.exit("assign leader failed")
return
tdSql.execute("INSERT INTO d0 VALUES (NOW, 10.3, 219, 0.31);")
sc.dnodeStart(3)
count = 0
while count < 100:
tdSql.query("show arbgroups;")
if tdSql.getData(0, 4) == 1:
break
tdLog.info("wait 1 seconds for is sync")
time.sleep(1)
count += 1
if count == 100:
tdLog.exit("arbgroup sync failed")
return
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -24,7 +24,7 @@ import platform
import socket
import threading
import importlib
import ast
import toml
from frame.log import *
@ -56,6 +56,17 @@ def checkRunTimeError():
if hwnd:
os.system("TASKKILL /F /IM taosd.exe")
def get_local_classes_in_order(file_path):
with open(file_path, "r", encoding="utf-8") as file:
tree = ast.parse(file.read(), filename=file_path)
classes = [node.name for node in ast.walk(tree) if isinstance(node, ast.ClassDef)]
return classes
def dynamicLoadModule(fileName):
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
return importlib.import_module(moduleName, package='..')
#
# run case on previous cluster
#
@ -66,9 +77,11 @@ def runOnPreviousCluster(host, config, fileName):
sep = "/"
if platform.system().lower() == 'windows':
sep = os.sep
moduleName = fileName.replace(".py", "").replace(sep, ".")
uModule = importlib.import_module(moduleName)
case = uModule.TDTestCase()
uModule = dynamicLoadModule(fileName)
class_names = get_local_classes_in_order(fileName)
case_class = getattr(uModule, class_names[-1])
case = case_class()
# create conn
conn = taos.connect(host, config)
@ -358,10 +371,11 @@ if __name__ == "__main__":
updateCfgDictStr = ''
# adapter_cfg_dict_str = ''
if is_test_framework:
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
uModule = importlib.import_module(moduleName)
uModule = dynamicLoadModule(fileName)
try:
ucase = uModule.TDTestCase()
class_names = get_local_classes_in_order(fileName)
case_class = getattr(uModule, class_names[-1])
ucase = case_class()
if ((json.dumps(updateCfgDict) == '{}') and hasattr(ucase, 'updatecfgDict')):
updateCfgDict = ucase.updatecfgDict
updateCfgDictStr = "-d %s"%base64.b64encode(json.dumps(updateCfgDict).encode()).decode()
@ -530,10 +544,11 @@ if __name__ == "__main__":
except:
pass
if is_test_framework:
moduleName = fileName.replace(".py", "").replace("/", ".")
uModule = importlib.import_module(moduleName)
uModule = dynamicLoadModule(fileName)
try:
ucase = uModule.TDTestCase()
class_names = get_local_classes_in_order(fileName)
case_class = getattr(uModule, class_names[-1])
ucase = case_class()
if (json.dumps(updateCfgDict) == '{}'):
updateCfgDict = ucase.updatecfgDict
if (json.dumps(adapter_cfg_dict) == '{}'):

View File

@ -22,6 +22,9 @@ import json
import platform
import socket
import threading
import ast
import importlib
import os
import toml
@ -56,6 +59,17 @@ def checkRunTimeError():
if hwnd:
os.system("TASKKILL /F /IM taosd.exe")
def get_local_classes_in_order(file_path):
with open(file_path, "r", encoding="utf-8") as file:
tree = ast.parse(file.read(), filename=file_path)
classes = [node.name for node in ast.walk(tree) if isinstance(node, ast.ClassDef)]
return classes
def dynamicLoadModule(fileName):
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
return importlib.import_module(moduleName, package='..')
if __name__ == "__main__":
@ -295,10 +309,11 @@ if __name__ == "__main__":
updateCfgDictStr = ""
# adapter_cfg_dict_str = ''
if is_test_framework:
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
uModule = importlib.import_module(moduleName)
uModule = dynamicLoadModule(fileName)
try:
ucase = uModule.TDTestCase()
class_names = get_local_classes_in_order(fileName)
case_class = getattr(uModule, class_names[-1])
ucase = case_class()
if (json.dumps(updateCfgDict) == "{}") and hasattr(
ucase, "updatecfgDict"
):
@ -434,10 +449,11 @@ if __name__ == "__main__":
except:
pass
if is_test_framework:
moduleName = fileName.replace(".py", "").replace("/", ".")
uModule = importlib.import_module(moduleName)
uModule = dynamicLoadModule(fileName)
try:
ucase = uModule.TDTestCase()
class_names = get_local_classes_in_order(fileName)
case_class = getattr(uModule, class_names[-1])
ucase = case_class()
if json.dumps(updateCfgDict) == "{}":
updateCfgDict = ucase.updatecfgDict
if json.dumps(adapter_cfg_dict) == "{}":

View File

@ -22,6 +22,7 @@
,,y,army,./pytest.sh python3 ./test.py -f multi-level/mlevel_basic.py -N 3 -L 3 -D 2
,,y,army,./pytest.sh python3 ./test.py -f db-encrypt/basic.py -N 3 -M 3
,,y,army,./pytest.sh python3 ./test.py -f cluster/arbitrator.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f cluster/arbitrator_restart.py -N 3
,,n,army,python3 ./test.py -f storage/s3/s3Basic.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f cluster/snapshot.py -N 3 -L 3 -D 2
,,y,army,./pytest.sh python3 ./test.py -f query/function/test_func_elapsed.py
@ -1812,3 +1813,6 @@
,,n,develop-test,python3 ./test.py -f 2-query/ts-range.py
,,n,develop-test,python3 ./test.py -f 2-query/tag_scan.py
,,n,develop-test,python3 ./test.py -f 2-query/show_create_db.py
# new test
,,y,test_new,./pytest.sh python3 ./test.py -f storage/compact/test_compact_meta.py

View File

@ -19,6 +19,7 @@ import subprocess
import time
from distutils.log import warn as printf
import platform
import ast
from util.log import *
from util.dnodes import *
@ -26,6 +27,17 @@ from util.cases import *
import taos
def get_local_classes_in_order(file_path):
with open(file_path, "r", encoding="utf-8") as file:
tree = ast.parse(file.read(), filename=file_path)
classes = [node.name for node in ast.walk(tree) if isinstance(node, ast.ClassDef)]
return classes
def dynamicLoadModule(fileName):
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
return importlib.import_module(moduleName, package='..')
if __name__ == "__main__":
fileName = "all"
@ -136,10 +148,11 @@ if __name__ == "__main__":
except:
pass
if is_test_framework:
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
uModule = importlib.import_module(moduleName)
uModule = dynamicLoadModule(fileName)
try:
ucase = uModule.TDTestCase()
class_names = get_local_classes_in_order(fileName)
case_class = getattr(uModule, class_names[-1])
ucase = case_class()
tdDnodes.deploy(1,ucase.updatecfgDict)
except :
tdDnodes.deploy(1,{})
@ -170,10 +183,11 @@ if __name__ == "__main__":
except:
pass
if is_test_framework:
moduleName = fileName.replace(".py", "").replace("/", ".")
uModule = importlib.import_module(moduleName)
uModule = dynamicLoadModule(fileName)
try:
ucase = uModule.TDTestCase()
class_names = get_local_classes_in_order(fileName)
case_class = getattr(uModule, class_names[-1])
ucase = case_class()
tdDnodes.deploy(1,ucase.updatecfgDict)
except :
tdDnodes.deploy(1,{})

View File

@ -20,6 +20,7 @@ import importlib
import traceback
from util.log import *
import platform
import ast
class TDCase:
@ -51,12 +52,23 @@ class TDCases:
def addCluster(self, name, case):
self.clusterCases.append(TDCase(name, case))
def get_local_classes_in_order(self, file_path):
with open(file_path, "r", encoding="utf-8") as file:
tree = ast.parse(file.read(), filename=file_path)
classes = [node.name for node in ast.walk(
tree) if isinstance(node, ast.ClassDef)]
return classes
def runAllLinux(self, conn):
# TODO: load all Linux cases here
runNum = 0
for tmp in self.linuxCases:
if tmp.name.find(fileName) != -1:
case = testModule.TDTestCase()
# get the last class name as the test case class name
class_names = self.get_local_classes_in_order(fileName)
case_class = getattr(testModule, class_names[-1])
case = case_class()
case.init(conn)
case.run()
case.stop()
@ -71,7 +83,10 @@ class TDCases:
runNum = 0
for tmp in self.linuxCases:
if tmp.name.find(fileName) != -1:
case = testModule.TDTestCase()
# get the last class name as the test case class name
class_names = self.get_local_classes_in_order(fileName)
case_class = getattr(testModule, class_names[-1])
case = case_class()
case.init(conn, self._logSql, replicaVar)
try:
case.run()
@ -88,7 +103,10 @@ class TDCases:
runNum = 0
for tmp in self.windowsCases:
if tmp.name.find(fileName) != -1:
case = testModule.TDTestCase()
# get the last class name as the test case class name
class_names = self.get_local_classes_in_order(fileName)
case_class = getattr(testModule, class_names[-1])
case = case_class()
case.init(conn)
case.run()
case.stop()
@ -103,8 +121,11 @@ class TDCases:
runNum = 0
for tmp in self.windowsCases:
if tmp.name.find(fileName) != -1:
case = testModule.TDTestCase()
case.init(conn, self._logSql,replicaVar)
# get the last class name as the test case class name
class_names = self.get_local_classes_in_order(fileName)
case_class = getattr(testModule, class_names[-1])
case = case_class()
case.init(conn, self._logSql, replicaVar)
try:
case.run()
except Exception as e:
@ -117,12 +138,16 @@ class TDCases:
def runAllCluster(self):
# TODO: load all cluster case module here
testModule = self.__dynamicLoadModule(fileName)
runNum = 0
for tmp in self.clusterCases:
if tmp.name.find(fileName) != -1:
tdLog.notice("run cases like %s" % (fileName))
case = testModule.TDTestCase()
# get the last class name as the test case class name
class_names = self.get_local_classes_in_order(fileName)
case_class = getattr(testModule, class_names[-1])
case = case_class()
case.init()
case.run()
case.stop()
@ -138,7 +163,10 @@ class TDCases:
for tmp in self.clusterCases:
if tmp.name.find(fileName) != -1:
tdLog.notice("run cases like %s" % (fileName))
case = testModule.TDTestCase()
# get the last class name as the test case class name
class_names = self.get_local_classes_in_order(fileName)
case_class = getattr(testModule, class_names[-1])
case = case_class()
case.init()
case.run()
case.stop()
@ -170,19 +198,20 @@ class TDCases:
else:
tdLog.info("taosBenchmark found in %s" % paths[0])
return paths[0]
def taosBenchmarkExec(self, param):
buildPath = tdCases.getTaosBenchmarkPath()
if (platform.system().lower() == 'windows'):
cmdStr1 = ' mintty -h never %s %s '%(buildPath, param)
cmdStr1 = ' mintty -h never %s %s ' % (buildPath, param)
tdLog.info(cmdStr1)
os.system(cmdStr1)
else:
cmdStr1 = '%s %s &'%(buildPath, param)
cmdStr1 = '%s %s &' % (buildPath, param)
tdLog.info(cmdStr1)
os.system(cmdStr1)
time.sleep(5)
tdCases = TDCases()

View File

@ -24,6 +24,7 @@ import platform
import socket
import threading
import importlib
import ast
print(f"Python version: {sys.version}")
print(f"Version info: {sys.version_info}")
@ -58,6 +59,18 @@ def checkRunTimeError():
if hwnd:
os.system("TASKKILL /F /IM taosd.exe")
def get_local_classes_in_order(file_path):
with open(file_path, "r", encoding="utf-8") as file:
tree = ast.parse(file.read(), filename=file_path)
classes = [node.name for node in ast.walk(tree) if isinstance(node, ast.ClassDef)]
return classes
def dynamicLoadModule(fileName):
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
return importlib.import_module(moduleName, package='..')
#
# run case on previous cluster
#
@ -68,9 +81,11 @@ def runOnPreviousCluster(host, config, fileName):
sep = "/"
if platform.system().lower() == 'windows':
sep = os.sep
moduleName = fileName.replace(".py", "").replace(sep, ".")
uModule = importlib.import_module(moduleName)
case = uModule.TDTestCase()
uModule = dynamicLoadModule(fileName)
class_names = get_local_classes_in_order(fileName)
case_class = getattr(uModule, class_names[-1])
case = case_class()
# create conn
conn = taos.connect(host, config)
@ -350,10 +365,11 @@ if __name__ == "__main__":
updateCfgDictStr = ''
# adapter_cfg_dict_str = ''
if is_test_framework:
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
uModule = importlib.import_module(moduleName)
uModule = dynamicLoadModule(fileName)
try:
ucase = uModule.TDTestCase()
class_names = get_local_classes_in_order(fileName)
case_class = getattr(uModule, class_names[-1])
ucase = case_class()
if ((json.dumps(updateCfgDict) == '{}') and hasattr(ucase, 'updatecfgDict')):
updateCfgDict = ucase.updatecfgDict
updateCfgDictStr = "-d %s"%base64.b64encode(json.dumps(updateCfgDict).encode()).decode()
@ -522,10 +538,11 @@ if __name__ == "__main__":
except:
pass
if is_test_framework:
moduleName = fileName.replace(".py", "").replace("/", ".")
uModule = importlib.import_module(moduleName)
uModule = dynamicLoadModule(fileName)
try:
ucase = uModule.TDTestCase()
class_names = get_local_classes_in_order(fileName)
case_class = getattr(uModule, class_names[-1])
ucase = case_class()
if (json.dumps(updateCfgDict) == '{}'):
updateCfgDict = ucase.updatecfgDict
if (json.dumps(adapter_cfg_dict) == '{}'):

View File

@ -0,0 +1,248 @@
# tests/test_new/xxx/xxx/test_xxx.py
# import ...
'''
./pytest.sh python3 ./test.py -f storage/compact/test_compact_meta.py
'''
import taos
import sys
from math import inf
from util.dnodes import tdDnodes
from util.sql import *
from util.cases import *
from util.log import *
import inspect
import random
sys.path.append("../tests/pytest")
class TestCompactMeta:
def caseDescription(self):
'''
case1<Hongze Cheng>: [TS-5445] Compact Meta Data
'''
return
def init(self, conn, logSql, replicaVer=1):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), True)
self.conn = conn
def run(self):
self.test_case1()
self.test_case2()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
def test_case1(self):
"""
Description:
1. Alter child table tags
2. Make sure compact meta works
"""
tdLog.info(f'case {inspect.currentframe().f_code.co_name} start')
db_name = 'db1'
stb_name = 'stb1'
ctb_name_prefix = 'ctb'
num_child_tables = 10000
# Drop database
sql = f'drop database if exists {db_name}'
tdSql.execute(sql)
# Create database
sql = f'create database {db_name} vgroups 1'
tdSql.execute(sql)
# Create super table
sql = f'create table {db_name}.{stb_name} (ts timestamp, c1 int, c2 int) tags(t1 int)'
tdSql.execute(sql)
# Create child tables
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: create {num_child_tables} child tables')
for i in range(1, num_child_tables+1):
if i % 100 == 0:
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: create {i} child tables')
sql = f'create table {db_name}.{ctb_name_prefix}{i} using {db_name}.{stb_name} tags({i})'
tdSql.execute(sql)
# Insert some data
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: insert data to child tables')
for i in range(1, num_child_tables+1):
if i % 100 == 0:
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: insert data to {i} child tables')
sql = f'insert into {db_name}.{ctb_name_prefix}{i} values(now, 1, 2)'
tdSql.execute(sql)
# Alter child table tags
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: alter child table tags')
for i in range(1, num_child_tables+1):
if i % 100 == 0:
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: altered {i} child tables')
sql = f'alter table {db_name}.{ctb_name_prefix}{i} set tag t1 = {i+1}'
tdSql.execute(sql)
# Randomly select 100 child tables to do query
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: randomly select 100 child tables to query')
selected_tables = random.sample(range(1, num_child_tables + 1), 100)
for i, table_idx in enumerate(selected_tables):
# Query data from the child table
sql = f'select count(*) from {db_name}.{stb_name} where t1 = {table_idx + 1}'
tdSql.query(sql)
tdSql.checkData(0, 0, 1) # Check c2 column value
# Compact meta
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: start to compact meta')
sql = f'compact database {db_name} meta_only'
tdSql.execute(sql)
# Wait for the compact is done
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: wait compact is done')
while True:
sql = 'show compacts'
rows = tdSql.query(sql)
if rows == 0:
break
time.sleep(1)
# Write more data
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: insert more data to child tables')
for i in range(1, num_child_tables+1):
if i % 100 == 0:
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: insert data to {i} child tables')
sql = f'insert into {db_name}.{ctb_name_prefix}{i} values(now, 1, 2)'
tdSql.execute(sql)
# Randomly select 100 child tables to do query
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: query data again to verify')
for i, table_idx in enumerate(selected_tables):
# Query data from the child table
sql = f'select count(*) from {db_name}.{stb_name} where t1 = {table_idx + 1}'
tdSql.query(sql)
tdSql.checkData(0, 0, 2) # Check c2 column value
def test_case2(self):
"""
Description:
1. Alter super table schema
2. Make sure compact meta works
"""
tdLog.info(f'case {inspect.currentframe().f_code.co_name} start')
db_name = 'db2'
stb_name = 'stb2'
ctb_name_prefix = 'ctb'
num_child_tables = 1000
# Drop database
sql = f'drop database if exists {db_name}'
tdSql.execute(sql)
# Create database
sql = f'create database {db_name} vgroups 1'
tdSql.execute(sql)
# Create super table
sql = f'create table {db_name}.{stb_name} (ts timestamp, c1 int, c2 int) tags(t1 int)'
tdSql.execute(sql)
# Create child tables
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: create {num_child_tables} child tables')
for i in range(1, num_child_tables+1):
if i % 100 == 0:
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: create {i} child tables')
sql = f'create table {db_name}.{ctb_name_prefix}{i} using {db_name}.{stb_name} tags({i})'
tdSql.execute(sql)
# Insert some data
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: insert data to child tables')
for i in range(1, num_child_tables+1):
if i % 100 == 0:
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: insert data to {i} child tables')
sql = f'insert into {db_name}.{ctb_name_prefix}{i} (ts, c1) values (now, 1)'
tdSql.execute(sql)
# Alter super table schema
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: alter super table schema')
for i in range(3, 2000):
if i % 100 == 0:
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: altered {i} times of super table schema')
# Add a column
sql = f'alter table {db_name}.{stb_name} add column c{i} int'
tdSql.execute(sql)
# Drop a column
sql = f'alter table {db_name}.{stb_name} drop column c{i}'
tdSql.execute(sql)
# Randomly select 100 child tables to do query
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: randomly select 100 child tables to query')
selected_tables = random.sample(range(1, num_child_tables + 1), 100)
for i, table_idx in enumerate(selected_tables):
# Query data from the child table
sql = f'select count(*) from {db_name}.{stb_name} where t1 = {table_idx + 1}'
tdSql.query(sql)
tdSql.checkData(0, 0, 1) # Check c2 column value
# Compact meta
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: start to compact meta')
sql = f'compact database {db_name} meta_only'
tdSql.execute(sql)
# Wait for the compact is done
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: wait compact is done')
while True:
sql = 'show compacts'
rows = tdSql.query(sql)
if rows == 0:
break
time.sleep(1)
# Write more data
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: insert more data to child tables')
for i in range(1, num_child_tables+1):
if i % 100 == 0:
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: insert data to {i} child tables')
sql = f'insert into {db_name}.{ctb_name_prefix}{i} values(now, 1, 2)'
tdSql.execute(sql)
# Randomly select 100 child tables to do query
tdLog.info(
f'case {inspect.currentframe().f_code.co_name}: query data again to verify')
for i, table_idx in enumerate(selected_tables):
# Query data from the child table
sql = f'select count(*) from {db_name}.{stb_name} where t1 = {table_idx + 1}'
tdSql.query(sql)
tdSql.checkData(0, 0, 2) # Check c2 column value
tdCases.addWindows(__file__, TestCompactMeta())
tdCases.addLinux(__file__, TestCompactMeta())

View File

@ -39,14 +39,15 @@ import taos
import taosrest
import taosws
def checkRunTimeError():
import win32gui
timeCount = 0
while 1:
time.sleep(1)
timeCount = timeCount + 1
print("checkRunTimeError",timeCount)
if (timeCount>1200):
print("checkRunTimeError", timeCount)
if (timeCount > 1200):
print("stop the test.")
os.system("TASKKILL /F /IM taosd.exe")
os.system("TASKKILL /F /IM taos.exe")
@ -54,13 +55,30 @@ def checkRunTimeError():
os.system("TASKKILL /F /IM mintty.exe")
os.system("TASKKILL /F /IM python.exe")
quit(0)
hwnd = win32gui.FindWindow(None, "Microsoft Visual C++ Runtime Library")
hwnd = win32gui.FindWindow(
None, "Microsoft Visual C++ Runtime Library")
if hwnd:
os.system("TASKKILL /F /IM taosd.exe")
def get_local_classes_in_order(file_path):
with open(file_path, "r", encoding="utf-8") as file:
tree = ast.parse(file.read(), filename=file_path)
classes = [node.name for node in ast.walk(
tree) if isinstance(node, ast.ClassDef)]
return classes
def dynamicLoadModule(fileName):
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
return importlib.import_module(moduleName, package='..')
#
# run case on previous cluster
#
def runOnPreviousCluster(host, config, fileName):
print("enter run on previeous")
@ -68,9 +86,11 @@ def runOnPreviousCluster(host, config, fileName):
sep = "/"
if platform.system().lower() == 'windows':
sep = os.sep
moduleName = fileName.replace(".py", "").replace(sep, ".")
uModule = importlib.import_module(moduleName)
case = uModule.TDTestCase()
uModule = dynamicLoadModule(fileName)
class_names = get_local_classes_in_order(fileName)
case_class = getattr(uModule, class_names[-1])
case = case_class()
# create conn
conn = taos.connect(host, config)
@ -115,7 +135,7 @@ if __name__ == "__main__":
previousCluster = False
crashGen = False
opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RWD:n:i:aP:G', [
'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy','createDnodeNums','restful','websocket','adaptercfgupdate','replicaVar','independentMnode','previous',"crashGen"])
'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd', 'dnodeNums', 'mnodeNums', 'queryPolicy', 'createDnodeNums', 'restful', 'websocket', 'adaptercfgupdate', 'replicaVar', 'independentMnode', 'previous', "crashGen"])
for key, value in opts:
if key in ['-h', '--help']:
tdLog.printNoPrefix(
@ -141,7 +161,8 @@ if __name__ == "__main__":
tdLog.printNoPrefix('-n the number of replicas')
tdLog.printNoPrefix('-i independentMnode Mnode')
tdLog.printNoPrefix('-a address sanitizer mode')
tdLog.printNoPrefix('-P run case with [P]revious cluster, do not create new cluster to run case.')
tdLog.printNoPrefix(
'-P run case with [P]revious cluster, do not create new cluster to run case.')
tdLog.printNoPrefix('-G crashGen mode')
sys.exit(0)
@ -219,7 +240,8 @@ if __name__ == "__main__":
if key in ['-D', '--adaptercfgupdate']:
try:
adaptercfgupdate = eval(base64.b64decode(value.encode()).decode())
adaptercfgupdate = eval(
base64.b64decode(value.encode()).decode())
except:
print('adapter cfg update convert fail.')
sys.exit(0)
@ -233,7 +255,6 @@ if __name__ == "__main__":
if key in ['-G', '--crashGen']:
crashGen = True
#
# do exeCmd command
#
@ -260,7 +281,7 @@ if __name__ == "__main__":
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
processID = subprocess.check_output(psCmd, shell=True)
while(processID):
while (processID):
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(psCmd, shell=True)
@ -287,7 +308,7 @@ if __name__ == "__main__":
# psCmd = f"pgrep {toBeKilled}"
processID = subprocess.check_output(psCmd, shell=True)
while(processID):
while (processID):
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(psCmd, shell=True)
@ -334,7 +355,7 @@ if __name__ == "__main__":
if platform.system().lower() == 'windows':
fileName = fileName.replace("/", os.sep)
if (masterIp == "" and not fileName == "0-others\\udf_create.py"):
threading.Thread(target=checkRunTimeError,daemon=True).start()
threading.Thread(target=checkRunTimeError, daemon=True).start()
tdLog.info("Procedures for testing self-deployment")
tdDnodes.init(deployPath, masterIp)
tdDnodes.setTestCluster(testCluster)
@ -350,13 +371,15 @@ if __name__ == "__main__":
updateCfgDictStr = ''
# adapter_cfg_dict_str = ''
if is_test_framework:
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
uModule = importlib.import_module(moduleName)
uModule = dynamicLoadModule(fileName)
try:
ucase = uModule.TDTestCase()
class_names = get_local_classes_in_order(fileName)
case_class = getattr(uModule, class_names[-1])
ucase = case_class()
if ((json.dumps(updateCfgDict) == '{}') and hasattr(ucase, 'updatecfgDict')):
updateCfgDict = ucase.updatecfgDict
updateCfgDictStr = "-d %s"%base64.b64encode(json.dumps(updateCfgDict).encode()).decode()
updateCfgDictStr = "-d %s" % base64.b64encode(
json.dumps(updateCfgDict).encode()).decode()
if ((json.dumps(adapter_cfg_dict) == '{}') and hasattr(ucase, 'taosadapter_cfg_dict')):
adapter_cfg_dict = ucase.taosadapter_cfg_dict
# adapter_cfg_dict_str = f"-D {base64.b64encode(toml.dumps(adapter_cfg_dict).encode()).decode()}"
@ -368,8 +391,8 @@ if __name__ == "__main__":
tAdapter.init(deployPath, masterIp)
tAdapter.stop(force_kill=True)
if dnodeNums == 1 :
tdDnodes.deploy(1,updateCfgDict)
if dnodeNums == 1:
tdDnodes.deploy(1, updateCfgDict)
tdDnodes.start(1)
tdCases.logSql(logSql)
if restful or websocket:
@ -377,13 +400,15 @@ if __name__ == "__main__":
tAdapter.start()
if queryPolicy != 1:
queryPolicy=int(queryPolicy)
queryPolicy = int(queryPolicy)
if restful:
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
conn = taosrest.connect(
url=f"http://{host}:6041", timezone="utc")
elif websocket:
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
conn = taosws.connect(
f"taosws://root:taosdata@{host}:6041")
else:
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
conn = taos.connect(host, config=tdDnodes.getSimCfgPath())
cursor = conn.cursor()
cursor.execute("create qnode on dnode 1")
@ -391,16 +416,20 @@ if __name__ == "__main__":
cursor.execute("show local variables")
res = cursor.fetchall()
for i in range(cursor.rowcount):
if res[i][0] == "queryPolicy" :
if res[i][0] == "queryPolicy":
if int(res[i][1]) == int(queryPolicy):
tdLog.info(f'alter queryPolicy to {queryPolicy} successfully')
tdLog.info(
f'alter queryPolicy to {queryPolicy} successfully')
cursor.close()
else:
tdLog.debug(res)
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
else :
tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums))
dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode)
tdLog.exit(
f"alter queryPolicy to {queryPolicy} failed")
else:
tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode" % (
dnodeNums, mnodeNums))
dnodeslist = cluster.configure_cluster(
dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode)
tdDnodes = ClusterDnodes(dnodeslist)
tdDnodes.init(deployPath, masterIp)
tdDnodes.setTestCluster(testCluster)
@ -417,31 +446,34 @@ if __name__ == "__main__":
tAdapter.start()
if restful:
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
conn = taosrest.connect(
url=f"http://{host}:6041", timezone="utc")
elif websocket:
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
else:
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
conn = taos.connect(host, config=tdDnodes.getSimCfgPath())
# tdLog.info(tdDnodes.getSimCfgPath(),host)
if createDnodeNums == 1:
createDnodeNums=dnodeNums
createDnodeNums = dnodeNums
else:
createDnodeNums=createDnodeNums
cluster.create_dnode(conn,createDnodeNums)
cluster.create_mnode(conn,mnodeNums)
createDnodeNums = createDnodeNums
cluster.create_dnode(conn, createDnodeNums)
cluster.create_mnode(conn, mnodeNums)
try:
if cluster.check_dnode(conn) :
if cluster.check_dnode(conn):
print("check dnode ready")
except Exception as r:
print(r)
if queryPolicy != 1:
queryPolicy=int(queryPolicy)
queryPolicy = int(queryPolicy)
if restful:
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
conn = taosrest.connect(
url=f"http://{host}:6041", timezone="utc")
elif websocket:
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
conn = taosws.connect(
f"taosws://root:taosdata@{host}:6041")
else:
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
conn = taos.connect(host, config=tdDnodes.getSimCfgPath())
cursor = conn.cursor()
cursor.execute("create qnode on dnode 1")
@ -449,23 +481,27 @@ if __name__ == "__main__":
cursor.execute("show local variables")
res = cursor.fetchall()
for i in range(cursor.rowcount):
if res[i][0] == "queryPolicy" :
if res[i][0] == "queryPolicy":
if int(res[i][1]) == int(queryPolicy):
tdLog.info(f'alter queryPolicy to {queryPolicy} successfully')
tdLog.info(
f'alter queryPolicy to {queryPolicy} successfully')
cursor.close()
else:
tdLog.debug(res)
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
tdLog.exit(
f"alter queryPolicy to {queryPolicy} failed")
if ucase is not None and hasattr(ucase, 'noConn') and ucase.noConn == True:
conn = None
else:
if restful:
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
conn = taosrest.connect(
url=f"http://{host}:6041", timezone="utc")
elif websocket:
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
else:
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
conn = taos.connect(
host=f"{host}", config=tdDnodes.getSimCfgPath())
if testCluster:
tdLog.info("Procedures for testing cluster")
@ -476,11 +512,13 @@ if __name__ == "__main__":
else:
tdLog.info("Procedures for testing self-deployment")
if restful:
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
conn = taosrest.connect(
url=f"http://{host}:6041", timezone="utc")
elif websocket:
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
else:
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
conn = taos.connect(
host=f"{host}", config=tdDnodes.getSimCfgPath())
if fileName == "all":
tdCases.runAllWindows(conn)
@ -497,14 +535,19 @@ if __name__ == "__main__":
tdDnodes.start(1)
time.sleep(1)
if restful:
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
conn = taosrest.connect(
url=f"http://{host}:6041", timezone="utc")
elif websocket:
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
conn = taosws.connect(
f"taosws://root:taosdata@{host}:6041")
else:
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
tdLog.info("Procedures for tdengine deployed in %s" % (host))
conn = taos.connect(
host=f"{host}", config=tdDnodes.getSimCfgPath())
tdLog.info(
"Procedures for tdengine deployed in %s" % (host))
tdLog.info("query test after taosd restart")
tdCases.runOneWindows(conn, sp[0] + "_" + "restart.py", replicaVar)
tdCases.runOneWindows(
conn, sp[0] + "_" + "restart.py", replicaVar)
else:
tdLog.info("not need to query")
else:
@ -522,10 +565,11 @@ if __name__ == "__main__":
except:
pass
if is_test_framework:
moduleName = fileName.replace(".py", "").replace("/", ".")
uModule = importlib.import_module(moduleName)
uModule = dynamicLoadModule(fileName)
try:
ucase = uModule.TDTestCase()
class_names = get_local_classes_in_order(fileName)
case_class = getattr(uModule, class_names[-1])
ucase = case_class()
if (json.dumps(updateCfgDict) == '{}'):
updateCfgDict = ucase.updatecfgDict
if (json.dumps(adapter_cfg_dict) == '{}'):
@ -537,9 +581,9 @@ if __name__ == "__main__":
tAdapter.init(deployPath, masterIp)
tAdapter.stop(force_kill=True)
if dnodeNums == 1 :
if dnodeNums == 1:
# dnode is one
tdDnodes.deploy(1,updateCfgDict)
tdDnodes.deploy(1, updateCfgDict)
tdDnodes.start(1)
tdCases.logSql(logSql)
@ -548,13 +592,16 @@ if __name__ == "__main__":
tAdapter.start()
if queryPolicy != 1:
queryPolicy=int(queryPolicy)
queryPolicy = int(queryPolicy)
if restful:
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
conn = taosrest.connect(
url=f"http://{host}:6041", timezone="utc")
elif websocket:
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
conn = taosws.connect(
f"taosws://root:taosdata@{host}:6041")
else:
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
conn = taos.connect(
host=f"{host}", config=tdDnodes.getSimCfgPath())
# tdSql.init(conn.cursor())
# tdSql.execute("create qnode on dnode 1")
# tdSql.execute('alter local "queryPolicy" "%d"'%queryPolicy)
@ -573,19 +620,23 @@ if __name__ == "__main__":
cursor.execute("show local variables")
res = cursor.fetchall()
for i in range(cursor.rowcount):
if res[i][0] == "queryPolicy" :
if res[i][0] == "queryPolicy":
if int(res[i][1]) == int(queryPolicy):
tdLog.info(f'alter queryPolicy to {queryPolicy} successfully')
tdLog.info(
f'alter queryPolicy to {queryPolicy} successfully')
cursor.close()
else:
tdLog.debug(res)
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
tdLog.exit(
f"alter queryPolicy to {queryPolicy} failed")
else :
else:
# dnode > 1 cluster
tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums))
print(independentMnode,"independentMnode valuse")
dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode)
tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode" % (
dnodeNums, mnodeNums))
print(independentMnode, "independentMnode valuse")
dnodeslist = cluster.configure_cluster(
dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode)
tdDnodes = ClusterDnodes(dnodeslist)
tdDnodes.init(deployPath, masterIp)
tdDnodes.setTestCluster(testCluster)
@ -593,7 +644,7 @@ if __name__ == "__main__":
tdDnodes.setAsan(asan)
tdDnodes.stopAll()
for dnode in tdDnodes.dnodes:
tdDnodes.deploy(dnode.index,updateCfgDict)
tdDnodes.deploy(dnode.index, updateCfgDict)
for dnode in tdDnodes.dnodes:
tdDnodes.starttaosd(dnode.index)
tdCases.logSql(logSql)
@ -604,34 +655,39 @@ if __name__ == "__main__":
# create taos connect
if restful:
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
conn = taosrest.connect(
url=f"http://{host}:6041", timezone="utc")
elif websocket:
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
else:
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
print(tdDnodes.getSimCfgPath(),host)
conn = taos.connect(
host=f"{host}", config=tdDnodes.getSimCfgPath())
print(tdDnodes.getSimCfgPath(), host)
if createDnodeNums == 1:
createDnodeNums=dnodeNums
createDnodeNums = dnodeNums
else:
createDnodeNums=createDnodeNums
cluster.create_dnode(conn,createDnodeNums)
cluster.create_mnode(conn,mnodeNums)
createDnodeNums = createDnodeNums
cluster.create_dnode(conn, createDnodeNums)
cluster.create_mnode(conn, mnodeNums)
try:
if cluster.check_dnode(conn) :
if cluster.check_dnode(conn):
print("check dnode ready")
except Exception as r:
print(r)
# do queryPolicy option
if queryPolicy != 1:
queryPolicy=int(queryPolicy)
queryPolicy = int(queryPolicy)
if restful:
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
conn = taosrest.connect(
url=f"http://{host}:6041", timezone="utc")
elif websocket:
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
conn = taosws.connect(
f"taosws://root:taosdata@{host}:6041")
else:
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
conn = taos.connect(
host=f"{host}", config=tdDnodes.getSimCfgPath())
cursor = conn.cursor()
cursor.execute("create qnode on dnode 1")
@ -639,14 +695,15 @@ if __name__ == "__main__":
cursor.execute("show local variables")
res = cursor.fetchall()
for i in range(cursor.rowcount):
if res[i][0] == "queryPolicy" :
if res[i][0] == "queryPolicy":
if int(res[i][1]) == int(queryPolicy):
tdLog.info(f'alter queryPolicy to {queryPolicy} successfully')
tdLog.info(
f'alter queryPolicy to {queryPolicy} successfully')
cursor.close()
else:
tdLog.debug(res)
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
tdLog.exit(
f"alter queryPolicy to {queryPolicy} failed")
# run case
if testCluster:
@ -658,11 +715,13 @@ if __name__ == "__main__":
else:
tdLog.info("Procedures for testing self-deployment")
if restful:
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
conn = taosrest.connect(
url=f"http://{host}:6041", timezone="utc")
elif websocket:
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
else:
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
conn = taos.connect(
host=f"{host}", config=tdDnodes.getSimCfgPath())
if fileName == "all":
tdCases.runAllLinux(conn)
@ -680,14 +739,19 @@ if __name__ == "__main__":
tdDnodes.start(1)
time.sleep(1)
if restful:
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
conn = taosrest.connect(
url=f"http://{host}:6041", timezone="utc")
elif websocket:
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
conn = taosws.connect(
f"taosws://root:taosdata@{host}:6041")
else:
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
tdLog.info("Procedures for tdengine deployed in %s" % (host))
conn = taos.connect(
host=f"{host}", config=tdDnodes.getSimCfgPath())
tdLog.info(
"Procedures for tdengine deployed in %s" % (host))
tdLog.info("query test after taosd restart")
tdCases.runOneLinux(conn, sp[0] + "_" + "restart.py", replicaVar)
tdCases.runOneLinux(
conn, sp[0] + "_" + "restart.py", replicaVar)
else:
tdLog.info("not need to query")