Merge branch '3.0' of https://github.com/taosdata/TDengine into enh/TS-5445-3.0
This commit is contained in:
commit
0174d7404d
|
@ -6,6 +6,8 @@ on:
|
|||
- 'main'
|
||||
- '3.0'
|
||||
- '3.1'
|
||||
- 'enh/cmake-TD-33848'
|
||||
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'packaging/**'
|
||||
|
|
|
@ -0,0 +1,101 @@
|
|||
name: TDengine Doc Build
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
target_branch:
|
||||
description: "Target branch name of for building the document"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
target_pr_number:
|
||||
description: "PR number of target branch to merge for building the document"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
env:
|
||||
DOC_WKC: "/root/doc_ci_work"
|
||||
ZH_DOC_REPO: "docs.taosdata.com"
|
||||
EN_DOC_REPO: "docs.tdengine.com"
|
||||
TD_REPO: "TDengine"
|
||||
TOOLS_REPO: "taos-tools"
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on:
|
||||
group: CI
|
||||
labels: [self-hosted, doc-build]
|
||||
outputs:
|
||||
changed_files_zh: ${{ steps.set_output.outputs.changed_files_zh }}
|
||||
changed_files_en: ${{ steps.set_output.outputs.changed_files_en }}
|
||||
changed_files_non_doc: ${{ steps.set_output.outputs.changed_files_non_doc }}
|
||||
changed_files_non_tdgpt: ${{ steps.set_output.outputs.changed_files_non_tdgpt }}
|
||||
steps:
|
||||
- name: Get the latest document contents from the repository
|
||||
run: |
|
||||
set -e
|
||||
# ./.github/scripts/update_repo.sh ${{ env.DOC_WKC }}/${{ env.TD_REPO }} ${{ inputs.target_branch }} ${{ inputs.target_pr_number }}
|
||||
cd ${{ env.DOC_WKC }}/${{ env.TD_REPO }}
|
||||
git reset --hard
|
||||
git clean -f
|
||||
git remote prune origin
|
||||
git fetch
|
||||
git checkout ${{ inputs.target_branch }}
|
||||
git pull >/dev/null
|
||||
git fetch origin +refs/pull/${{ inputs.target_pr_number }}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
- name: Check whether the document is changed and set output variables
|
||||
id: set_output
|
||||
run: |
|
||||
set -e
|
||||
cd ${{ env.DOC_WKC }}/${{ env.TD_REPO }}
|
||||
changed_files_zh=$(git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${{ inputs.tartget_branch }}`| grep "^docs/zh/" | tr '\n' ' ' || :)
|
||||
changed_files_en=$(git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${{ inputs.tartget_branch }}`| grep "^docs/en/" | tr '\n' ' ' || :)
|
||||
changed_files_non_doc=$(git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${{ inputs.tartget_branch }}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | tr '\n' ' ' || :)
|
||||
changed_files_non_tdgpt=$(git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${{ inputs.tartget_branch }}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -Ev "forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics" | tr '\n' ' ' ||:)
|
||||
echo "changed_files_zh=${changed_files_zh}" >> $GITHUB_OUTPUT
|
||||
echo "changed_files_en=${changed_files_en}" >> $GITHUB_OUTPUT
|
||||
echo "changed_files_non_doc=${changed_files_non_doc}" >> $GITHUB_OUTPUT
|
||||
echo "changed_files_non_tdgpt=${changed_files_non_tdgpt}" >> $GITHUB_OUTPUT
|
||||
|
||||
build:
|
||||
needs: check
|
||||
runs-on:
|
||||
group: CI
|
||||
labels: [self-hosted, doc-build]
|
||||
if: ${{ needs.check.outputs.changed_files_zh != '' || needs.check.outputs.changed_files_en != '' }}
|
||||
|
||||
steps:
|
||||
- name: Get the latest document contents
|
||||
run: |
|
||||
set -e
|
||||
#./.github/scripts/update_repo.sh ${{ env.DOC_WKC }}/${{ env.TD_REPO }} ${{ inputs.target_branch }} ${{ inputs.target_pr_number }}
|
||||
cd ${{ env.DOC_WKC }}/${{ env.TD_REPO }}
|
||||
git reset --hard
|
||||
git clean -f
|
||||
git remote prune origin
|
||||
git fetch
|
||||
git checkout ${{ inputs.target_branch }}
|
||||
git pull >/dev/null
|
||||
git fetch origin +refs/pull/${{ inputs.target_pr_number }}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
|
||||
- name: Build the chinese document
|
||||
if: ${{ needs.check.outputs.changed_files_zh != '' }}
|
||||
run: |
|
||||
cd ${{ env.DOC_WKC }}/${{ env.ZH_DOC_REPO }}
|
||||
yarn ass local
|
||||
yarn build
|
||||
|
||||
- name: Build the english document
|
||||
if: ${{ needs.check.outputs.changed_files_en != '' }}
|
||||
run: |
|
||||
cd ${{ env.DOC_WKC }}/${{ env.EN_DOC_REPO }}
|
||||
yarn ass local
|
||||
yarn build
|
||||
|
||||
outputs:
|
||||
changed_files_zh: ${{ needs.check.outputs.changed_files_zh }}
|
||||
changed_files_en: ${{ needs.check.outputs.changed_files_en }}
|
||||
changed_files_non_doc: ${{ needs.check.outputs.changed_files_non_doc }}
|
||||
changed_files_non_tdgpt: ${{ needs.check.outputs.changed_files_non_tdgpt }}
|
|
@ -22,8 +22,6 @@ Through the `TDengine Java connector`, Seeq can easily support querying time-ser
|
|||
|
||||
## Configure Data Source
|
||||
|
||||
### Configuration of JDBC Connector
|
||||
|
||||
**Step 1**, Check the data storage location
|
||||
|
||||
```shell
|
||||
|
@ -42,9 +40,13 @@ sudo seeq restart
|
|||
|
||||
Use a browser to visit ip:34216 and follow the instructions to enter the license.
|
||||
|
||||
## Load TDengine Time-Series Data
|
||||
## Data Analysis
|
||||
|
||||
This chapter demonstrates how to use the Seeq software to load TDengine time-series data.
|
||||
### Scenario Introduction
|
||||
|
||||
The example scenario is a power system where users collect electricity usage data from power station instruments daily and store it in the TDengine cluster. Now, users want to predict how power consumption will develop and purchase more equipment to support it. User power consumption varies with monthly orders, and considering seasonal changes, power consumption will differ. This city is located in the northern hemisphere, so more electricity is used in summer. We simulate data to reflect these assumptions.
|
||||
|
||||
### Data preparation
|
||||
|
||||
**Step 1**, Create tables in TDengine.
|
||||
|
||||
|
@ -246,12 +248,6 @@ The source code is hosted on [GitHub Repository](https://github.com/sangshuduo/t
|
|||
}
|
||||
```
|
||||
|
||||
## Data Analysis
|
||||
|
||||
### Scenario Introduction
|
||||
|
||||
The example scenario is a power system where users collect electricity usage data from power station instruments daily and store it in the TDengine cluster. Now, users want to predict how power consumption will develop and purchase more equipment to support it. User power consumption varies with monthly orders, and considering seasonal changes, power consumption will differ. This city is located in the northern hemisphere, so more electricity is used in summer. We simulate data to reflect these assumptions.
|
||||
|
||||
### Using Seeq Workbench
|
||||
|
||||
Log in to the Seeq service page and create a new Seeq Workbench. By selecting data sources from search results and choosing different tools as needed, you can display data or make predictions. For detailed usage methods, refer to the [official knowledge base](https://support.seeq.com/space/KB/146440193/Seeq+Workbench).
|
||||
|
|
|
@ -12,31 +12,34 @@ Through the Python connector of TDengine, Superset can support TDengine data sou
|
|||
## Prerequisites
|
||||
|
||||
Prepare the following environment:
|
||||
- TDengine is installed and running normally (both Enterprise and Community versions are available)
|
||||
- taosAdapter is running normally, refer to [taosAdapter](../../../tdengine-reference/components/taosadapter/)
|
||||
- Apache Superset version 2.1.0 or above is already installed, refre to [Apache Superset](https://superset.apache.org/)
|
||||
|
||||
## Install TDengine Python Connector
|
||||
- TDengine 3.2.3.0 and above version is installed and running normally (both Enterprise and Community versions are available).
|
||||
- taosAdapter is running normally, refer to [taosAdapter](../../../tdengine-reference/components/taosadapter/).
|
||||
- Apache Superset version 2.1.0 or above is already installed, refre to [Apache Superset](https://superset.apache.org/).
|
||||
- Install Python connector driver, refer to [Python Client Library](../../../tdengine-reference/client-libraries/python).
|
||||
|
||||
:::tip
|
||||
The Python connector of TDengine comes with a connection driver that supports Superset in versions 2.1.18 and later, which will be automatically installed in the Superset directory and provide data source services.
|
||||
The connection uses the WebSocket protocol, so it is necessary to install the `taos-ws-py` component of TDengine separately. The complete installation script is as follows:
|
||||
```bash
|
||||
pip3 install taospy
|
||||
pip3 install taos-ws-py
|
||||
```
|
||||
:::
|
||||
|
||||
## Configure TDengine Connection In Superset
|
||||
## Configure Data Source
|
||||
|
||||
**Step 1**, enter the new database connection page, [Superset] -> [Setting] -> [Database Connections] -> [+DATABASE].
|
||||
|
||||
**Step 2**, select TDengine database connection, select the `TDengine` option from the drop-down list of [SUPPORTED DATABASES].
|
||||
|
||||
**Step 1**, enter the new database connection page, "Superset" → "Setting" → "Database Connections" → "+DATABASE"
|
||||
**Step 2**, select TDengine database connection, select the "TDengine" option from the drop-down list of "SUPPORTED DATABASES".
|
||||
:::tip
|
||||
If there is no TDengine option in the drop-down list, please confirm that the steps of installing, `Superset` is first and `Python Connector` is second.
|
||||
:::
|
||||
**Step 3**, write a name of connection in "DISPLAY NAME"
|
||||
**Step 4**, The "SQLALCHEMY URL" field is a key connection information string, and it must be filled in correctly
|
||||
|
||||
**Step 3**, write a name of connection in [DISPLAY NAME].
|
||||
|
||||
**Step 4**, The [SQLALCHEMY URL] field is a key connection information string, and it must be filled in correctly.
|
||||
|
||||
```bash
|
||||
taosws://user:password@host:port
|
||||
```
|
||||
|
||||
| Parameter | <center>Parameter Description</center> |
|
||||
|:---------- |:--------------------------------------------------------- |
|
||||
|user | Username for logging into TDengine database |
|
||||
|
@ -44,32 +47,34 @@ taosws://user:password@host:port
|
|||
|host | Name of the host where the TDengine database is located |
|
||||
|port | The port that provides WebSocket services, default is 6041 |
|
||||
|
||||
Example:
|
||||
The TDengine database installed on this machine provides WebSocket service port 6041, using the default username and password, "SQLALCHEMY URL" is:
|
||||
Example:
|
||||
|
||||
The TDengine database installed on this machine provides WebSocket service port 6041, using the default username and password, `SQLALCHEMY URL` is:
|
||||
|
||||
```bash
|
||||
taosws://root:taosdata@localhost:6041
|
||||
```
|
||||
**Step 5**, configure the connection string, click "TEST CONNECTION" to test if the connection can be successful. After passing the test, click the "CONNECT" button to complete the connection
|
||||
|
||||
**Step 5**, configure the connection string, click "TEST CONNECTION" to test if the connection can be successful. After passing the test, click the "CONNECT" button to complete the connection.
|
||||
|
||||
## Data Analysis
|
||||
|
||||
## Start
|
||||
### Data preparation
|
||||
|
||||
There is no difference in the use of TDengine data source compared to other data sources. Here is a brief introduction to basic data queries:
|
||||
1. Click the "+" button in the upper right corner of the Superset interface, select "SQL query", and enter the query interface
|
||||
2. Select the "TDengine" data source that has been created earlier from the dropdown list of "DATABASES" in the upper left corner
|
||||
3. Select the name of the database to be operated on from the drop-down list of "SCHEMA" (system libraries are not displayed)
|
||||
4. "SEE TABLE SCHEMA" select the name of the super table or regular table to be operated on (sub tables are not displayed)
|
||||
5. Subsequently, the schema information of the selected table will be displayed in the following area
|
||||
6. In the SQL editor area, any SQL statement that conforms to TDengine syntax can be entered for execution
|
||||
There is no difference in the use of TDengine data source compared to other data sources. Here is a brief introduction to basic data queries:
|
||||
|
||||
## Example
|
||||
1. Click the [+] button in the upper right corner of the Superset interface, select [SQL query], and enter the query interface.
|
||||
2. Select the `TDengine` data source that has been created earlier from the dropdown list of [DATABASES] in the upper left corner.
|
||||
3. Select the name of the database to be operated on from the drop-down list of [SCHEMA] (system libraries are not displayed).
|
||||
4. [SEE TABLE SCHEMA] select the name of the super table or regular table to be operated on (sub tables are not displayed).
|
||||
5. Subsequently, the schema information of the selected table will be displayed in the following area.
|
||||
6. In the `SQL` editor area, any `SQL` statement that conforms to `TDengine` syntax can be entered for execution.
|
||||
|
||||
We chose two popular templates from the Superset Chart template to showcase their effects, using smart meter data as an example:
|
||||
### Smart Meter Example
|
||||
|
||||
1. "Aggregate" Type, which displays the maximum voltage value collected per minute during the specified time period in Group 4
|
||||
We chose two popular templates from the [Superset Chart] template to showcase their effects, using smart meter data as an example:
|
||||
|
||||

|
||||
|
||||
2. "RAW RECORDS" Type, which displays the collected values of current and voltage during the specified time period in Group 4
|
||||
|
||||

|
||||
1. `Aggregate` Type, which displays the maximum voltage value collected per minute during the specified time period in Group 4.
|
||||

|
||||
2. `RAW RECORDS` Type, which displays the collected values of current and voltage during the specified time period in Group 4.
|
||||

|
|
@ -84,12 +84,12 @@ After modifying configuration file parameters, you need to restart the *taosd* s
|
|||
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
|timezone | |Not supported |Time zone; defaults to dynamically obtaining the current time zone setting from the system|
|
||||
|locale | |Not supported |System locale information and encoding format, defaults to obtaining from the system|
|
||||
|charset | |Not supported |Character set encoding, defaults to obtaining from the system|
|
||||
|timezone | | since 3.1.0.0 |Time zone; defaults to dynamically obtaining the current time zone setting from the system|
|
||||
|locale | | since 3.1.0.0 |System locale information and encoding format, defaults to obtaining from the system|
|
||||
|charset | | since 3.1.0.0 |Character set encoding, defaults to obtaining from the system|
|
||||
|
||||
:::info
|
||||
|
||||
#### Explanation of Regional Related Parameters
|
||||
1. To address the issue of data writing and querying across multiple time zones, TDengine uses Unix Timestamps to record and store timestamps. The nature of Unix Timestamps ensures that the timestamps generated are consistent at any given moment across any time zone. It is important to note that the conversion to Unix Timestamps is done on the client side. To ensure that other forms of time on the client are correctly converted to Unix Timestamps, it is necessary to set the correct time zone.
|
||||
|
||||
On Linux/macOS, the client automatically reads the time zone information set by the system. Users can also set the time zone in the configuration file in various ways. For example:
|
||||
|
@ -534,29 +534,23 @@ The `taosd_vnodes_role` table records virtual node role information.
|
|||
| duration | VARCHAR | tag | SQL execution duration, value range: 3-10s, 10-100s, 100-1000s, 1000s- |
|
||||
| cluster_id | VARCHAR | tag | cluster id |
|
||||
|
||||
## Log Related
|
||||
### taos\_slow\_sql\_detail 表
|
||||
|
||||
TDengine records the system's operational status through log files, helping users monitor the system's condition and troubleshoot issues. This section mainly introduces the related explanations of two system logs: taosc and taosd.
|
||||
`taos_slow_sql_detail` records slow query detail information.The rule of the table name is `{user}_{db}_{ip}_clusterId_{cluster_id}`
|
||||
|
||||
TDengine's log files mainly include two types: normal logs and slow logs.
|
||||
|
||||
1. Normal Log Behavior Explanation
|
||||
1. Multiple client processes can be started on the same machine, so the client log naming convention is taoslogX.Y, where X is a number, either empty or from 0 to 9, and Y is a suffix, either 0 or 1.
|
||||
2. Only one server process can exist on the same machine. Therefore, the server log naming convention is taosdlog.Y, where Y is a suffix, either 0 or 1.
|
||||
|
||||
The rules for determining the number and suffix are as follows (assuming the log path is /var/log/taos/):
|
||||
1. Determining the number: Use 10 numbers as the log naming convention, /var/log/taos/taoslog0.Y - /var/log/taos/taoslog9.Y, check each number sequentially to find the first unused number as the log file number for that process. If all 10 numbers are used by processes, do not use a number, i.e., /var/log/taos/taoslog.Y, and all processes write to the same file (number is empty).
|
||||
2. Determining the suffix: 0 or 1. For example, if the number is determined to be 3, the alternative log file names would be /var/log/taos/taoslog3.0 /var/log/taos/taoslog3.1. If both files do not exist, use suffix 0; if one exists and the other does not, use the existing suffix. If both exist, use the suffix of the file that was modified most recently.
|
||||
3. If the log file exceeds the configured number of lines numOfLogLines, it will switch suffixes and continue logging, e.g., /var/log/taos/taoslog3.0 is full, switch to /var/log/taos/taoslog3.1 to continue logging. /var/log/taos/taoslog3.0 will be renamed with a timestamp suffix and compressed for storage (handled by an asynchronous thread).
|
||||
4. Control how many days log files are kept through the configuration logKeepDays, logs older than a certain number of days will be deleted when new logs are compressed and stored. It is not based on natural days.
|
||||
|
||||
In addition to recording normal logs, SQL statements that take longer than the configured time will be recorded in the slow logs. Slow log files are mainly used for analyzing system performance and troubleshooting performance issues.
|
||||
|
||||
2. Slow Log Behavior Explanation
|
||||
1. Slow logs are recorded both locally in slow log files and sent to taosKeeper for structured storage via taosAdapter (monitor switch must be turned on).
|
||||
2. Slow log file storage rules are:
|
||||
1. One slow log file per day; if there are no slow logs for the day, there is no file for that day.
|
||||
2. The file name is taosSlowLog.yyyy-mm-dd (taosSlowLog.2024-08-02), and the log storage path is configured through logDir.
|
||||
3. Logs from multiple clients are stored in the same taosSlowLog.yyyy.mm.dd file under the respective log path.
|
||||
4. Slow log files are not automatically deleted or compressed.
|
||||
5. Uses the same three parameters as normal log files: logDir, minimalLogDirGB, asyncLog. The other two parameters, numOfLogLines and logKeepDays, do not apply to slow logs.
|
||||
| field | type | is\_tag | comment |
|
||||
| :------------- | :-------- | :------ | :---------------------------------------------------- |
|
||||
| start\_ts | TIMESTAMP | | sql start exec time in client, ms,primary key |
|
||||
| request\_id | UINT64_T | | sql request id, random hash |
|
||||
| query\_time | INT32_T | | sql exec time, ms |
|
||||
| code | INT32_T | | sql return code, 0 success |
|
||||
| error\_info | VARCHAR | | error info if sql exec failed |
|
||||
| type | INT8_T | | sql type(1-query, 2-insert, 4-others) |
|
||||
| rows\_num | INT64_T | | sql result rows num |
|
||||
| sql | VARCHAR | | sql sting |
|
||||
| process\_name | VARCHAR | | process name |
|
||||
| process\_id | VARCHAR | | process id |
|
||||
| db | VARCHAR | TAG | which db the sql belong to |
|
||||
| user | VARCHAR | TAG | the user that exec this sql |
|
||||
| ip | VARCHAR | TAG | the client ip that exec this sql |
|
||||
| cluster\_id | VARCHAR | TAG | cluster id |
|
||||
|
|
|
@ -71,7 +71,10 @@ WebSocket Connector Historical Versions:
|
|||
|
||||
|WebSocket Connector Version | Major Changes | TDengine Version|
|
||||
| ----------------------- | -------------------------------------------------------------------------------------------------- | ----------------- |
|
||||
|0.3.5 | Added support for VARBINARY and GEOMETRY types, fixed known issues. | 3.3.0.0 and higher|
|
||||
|0.3.9 | Fix the problem of incomplete data retrieval when customizing the number of rows with the "fetchmany" method. | - |
|
||||
|0.3.8 | Supported connecting SuperSet to the TDengine cloud service instance. | - |
|
||||
|0.3.5 | Fixed the issues in the crypto provider. | - |
|
||||
|0.3.4 | Supported varbinary and geometry data type. | 3.3.0.0 and higher |
|
||||
|0.3.2 | Optimize WebSocket SQL query and insertion performance, modify readme and documentation, fix known issues. | 3.2.3.0 and higher|
|
||||
|0.2.9 | Known issue fixes. | - |
|
||||
|0.2.5 | 1. Data subscription supports obtaining and resetting consumption progress. <br/>2 Support schemaless. <br/>3 Support STMT. | - |
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
---
|
||||
title: Product Roadmap
|
||||
---
|
||||
|
||||
The 2025 roadmap for TDengine OSS is described in the following table.
|
||||
|
||||
| Quarter | Feature |
|
||||
| :----- | :----- |
|
||||
| 2025Q1 | <ol><li>Virtual tables</li><li>Query engine: conditional expressions in <code>REGEXP</code>, <code>GREATEST</code>, <code>LEAST</code>, and <code>CAST</code> functions; improvements in single-row selection functions; time range interpolation with <code>INTERP</code></li><li>Storage engine: support for writing query results into supertables; <code>KEEP</code> parameter for supertables; performance improvements for the parameter binding interface</li><li>Stream processing: support for virtual tables; decreased compute resource usage; new mechanism for event notification; faster stream creation</li><li>Data types: support for the decimal data type</li><li>High availability: faster recovery from downtime; improved client failover</li><li>Stability: LTS release TDengine 3.3.6.x</li><li>JDBC driver: more efficient data ingestion</li><li>Ecosystem: integration with Microsoft Excel</li></ol> |
|
||||
| 2025Q2 | <ol><li>Query engine: relaxed restrictions on <code>JOIN</code> queries; support for all mathematical functions in MySQL; integral, integral average, and continuous variance functions; optimization of the <code>CSUM</code> function; support for <code>COUNT(DISTINCT)</code> syntax; enhancements to event windows; faster filtering by tag; faster <code>INTERP</code> queries</li><li>Storage engine: decreased compute resource usage for TSMAs; improved write jitter</li><li>Stream processing: high availability of snodes</li><li>Data types: support for the blob data type</li><li>Data subscription: support for the MQTT protocol</li><li>High availability: faster replica configuration changes; faster recovery from downtime for clusters; improved data recovery after power outage</li><li>Observability: diagnostic tool for data ingestion</li></ol> |
|
||||
| 2025Q3 | <ol><li>Query engine: more subqueries; support for all operators in MySQL; support for all time functions in MySQL; improved window calculation; reduced jitter in query performance; support for specifying columns in count windows</li><li>Storage engine: faster ingestion in SQL mode</li><li>Observability: diagnostic tool for queries; improved <code>EXPLAIN</code> output; monitoring of long-running tasks</li></ol> |
|
||||
| 2025Q4 | <ol><li>Query engine: window functions (i.e. the <code>OVER</code> clause); support for all string, aggregation, and conditional functions in MySQL; sorting within groups for partition queries; controls for query resource usage; faster aggregate queries on subtables; time range interpolation in <code>INTERVAL</code> windows</li><li>Data types: support for variable-length strings</li><li>Caching: faster row-oriented caching</li><li>Observability: more insight into operations and maintenance</li></ol> |
|
||||
|
||||
For more information, see [TDengine Public Roadmap](https://github.com/orgs/taosdata/projects/4).
|
|
@ -111,7 +111,7 @@ TDengine 还支持直接向超级表写入数据。需要注意的是,超级
|
|||
|
||||
```sql
|
||||
insert into meters (tbname, ts, current, voltage, phase, location, group_id)
|
||||
values( "d1001, "2018-10-03 14:38:05", 10.2, 220, 0.23, "California.SanFrancisco", 2)
|
||||
values("d1001", "2018-10-03 14:38:05", 10.2, 220, 0.23, "California.SanFrancisco", 2)
|
||||
```
|
||||
|
||||
### 零代码写入
|
||||
|
|
|
@ -67,9 +67,9 @@ alter database <dbname> replica 2|1
|
|||
|
||||
| 异常场景 | 集群状态 |
|
||||
| ------- | ------ |
|
||||
| 没有 Vnode 发生故障: Arbitrator 故障(Mnode 宕机节点超过一个,导致 Mnode 无法选主)| **持续提供服务** |
|
||||
| 没有 Vnode 发生故障:Arbitrator 故障(Mnode 宕机节点超过一个,导致 Mnode 无法选主)| **持续提供服务** |
|
||||
| 仅一个 Vnode 故障:VGroup 已经达成同步后,某一个 Vnode 才发生故障的 | **持续提供服务** |
|
||||
| 仅一个 Vnode 故障:2个 Vnode 同时故障,故障前 VGroup 达成同步,但是只有一个 Vnode 从故障中恢复服务,另一个 Vnode 服务故障 | **通过下面的命令,强制指定leader, 继续提供服务** |
|
||||
| 仅一个 Vnode 故障:2 个 Vnode 同时故障,故障前 VGroup 达成同步,但是只有一个 Vnode 从故障中恢复服务,另一个 Vnode 服务故障 | **通过下面的命令,强制指定 leader, 继续提供服务** |
|
||||
| 仅一个 Vnode 故障:离线 Vnode 启动后,VGroup 未达成同步前,另一个 Vnode 服务故障的 | **无法提供服务** |
|
||||
| 两个 Vnode 都发生故障 | **无法提供服务** |
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ Power BI 是由 Microsoft 提供的一种商业分析工具。通过配置使用
|
|||
- TDengine 3.3.4.0 以上版本集群已部署并正常运行(企业及社区版均可)。
|
||||
- taosAdapter 能够正常运行,详细参考 [taosAdapter 参考手册](../../../reference/components/taosadapter)。
|
||||
- 从 TDengine 官网下载最新的 Windows 操作系统 X64 客户端驱动程序并进行安装,详细参考 [安装 ODBC 驱动](../../../reference/connector/odbc/#安装)。
|
||||
- 安装完成 Power BI Desktop 软件并运行(如未安装,请从其官方地址下载最新的Windows操作系统 32/64 位版本)。
|
||||
- 安装完成 Power BI Desktop 软件并运行(如未安装,请从其官方地址下载最新的 Windows 操作系统 32/64 位版本)。
|
||||
|
||||
## 配置数据源
|
||||
|
||||
|
@ -29,8 +29,8 @@ Power BI 是由 Microsoft 提供的一种商业分析工具。通过配置使用
|
|||
### 使用说明
|
||||
|
||||
为了充分发挥 Power BI 在分析 TDengine中 数据方面的优势,用户需要先理解维度、度量、窗口切分查询、数据切分查询、时序和相关性等核心概念,之后通过自定义的 SQL 导入数据。
|
||||
- 维度:通常是分类(文本)数据,描述设备、测点、型号等类别信息。在 TDengine 的超级表中,使用标签列存储数据的维度信息,可以通过形如 “select distinct tbname, tag1, tag2 from supertable” 的SQL语法快速获得维度信息。
|
||||
- 度量:可以用于进行计算的定量(数值)字段,常见计算有求和、取平均值和最小值等。如果测点的采集周期为1s,那么一年就有 3000 多万条记录,把这些数据全部导入 Power BI 会严重影响其执行效率。在 TDengine 中,用户可以使用数据切分查询、窗口切分查询等语法,结合与窗口相关的伪列,把降采样后的数据导入Power BI 中,具体语法请参阅 TDengine 官方文档的特色查询功能部分。
|
||||
- 维度:通常是分类(文本)数据,描述设备、测点、型号等类别信息。在 TDengine 的超级表中,使用标签列存储数据的维度信息,可以通过形如 `select distinct tbname, tag1, tag2 from supertable` 的 SQL 语法快速获得维度信息。
|
||||
- 度量:可以用于进行计算的定量(数值)字段,常见计算有求和、取平均值和最小值等。如果测点的采集周期为1s,那么一年就有 3000 多万条记录,把这些数据全部导入 Power BI 会严重影响其执行效率。在 TDengine 中,用户可以使用数据切分查询、窗口切分查询等语法,结合与窗口相关的伪列,把降采样后的数据导入 Power BI 中,具体语法请参阅 TDengine 官方文档的特色查询功能部分。
|
||||
- 窗口切分查询:比如温度传感器每秒采集一次数据,但须查询每隔 10min 的温度平均值,在这种场景下可以使用窗口子句来获得需要的降采样查询结果,对应的 SQL 形如 `select tbname, _wstart date,avg(temperature) temp from table interval(10m)`,其中,`_wstart` 是伪列,表示时间窗口起始时间,10m 表示时间窗口的持续时间,`avg(temperature)` 表示时间窗口内的聚合值。
|
||||
- 数据切分查询:如果需要同时获取很多温度传感器的聚合数值,可对数据进行切分,然后在切分出的数据空间内进行一系列的计算,对应的 SQL 形如 `partition by part_list`。数据切分子句最常见的用法是在超级表查询中按标签将子表数据进行切分,将每个子表的数据独立出来,形成一条条独立的时间序列,方便针对各种时序场景的统计分析。
|
||||
- 时序:在绘制曲线或者按照时间聚合数据时,通常需要引入日期表。日期表可以从 Excel 表格中导入,也可以在 TDengine 中执行 SQL 获取,例如 `select _wstart date, count(*) cnt from test.meters where ts between A and B interval(1d) fill(0)`,其中 fill 字句表示数据缺失情况下的填充模式,伪列 _wstart 则为要获取的日期列。
|
||||
|
@ -46,7 +46,7 @@ TDengine 采用了一种独特的数据模型,以优化时序数据的存储
|
|||
|
||||
根据如下步骤,便可以体验通过 Power BI 生成时序数据报表的功能。
|
||||
|
||||
**第 1 步**,使用 TDengine 的 taosBenchMark 快速生成1000块智能电表3天的数据,采集频率为 1s。
|
||||
**第 1 步**,使用 TDengine 的 taosBenchMark 快速生成 1000 块智能电表 3 天的数据,采集频率为 1s。
|
||||
|
||||
```shell
|
||||
taosBenchmark -t 1000 -n 259200 -S 1000 -y
|
||||
|
|
|
@ -16,7 +16,7 @@ toc_max_heading_level: 4
|
|||
- TDengine 3.3.2.0 以上版本集群已部署并正常运行(企业及社区版均可)。
|
||||
- taosAdapter 能够正常运行,详细参考 [taosAdapter 参考手册](../../../reference/components/taosadapter)。
|
||||
- 确保永洪 BI 已经安装并运行(如果未安装,请到永洪科技官方下载页面下载)。
|
||||
- 安装JDBC驱动。从 maven.org 下载 TDengine JDBC 连接器文件 `taos-jdbcdriver-3.4.0-dist.jar` 及以上版本。
|
||||
- 安装 JDBC 驱动。从 maven.org 下载 TDengine JDBC 连接器文件 `taos-jdbcdriver-3.4.0-dist.jar` 及以上版本。
|
||||
|
||||
## 配置数据源
|
||||
|
||||
|
|
|
@ -18,8 +18,6 @@ Seeq 是制造业和工业互联网(IIOT)高级分析软件。Seeq 支持在
|
|||
|
||||
## 配置数据源
|
||||
|
||||
### 配置 JDBC 连接器
|
||||
|
||||
**第 1 步**,查看 data 存储位置
|
||||
|
||||
```
|
||||
|
@ -38,9 +36,13 @@ sudo seeq restart
|
|||
|
||||
使用浏览器访问 ip:34216 并按照说明输入 license。
|
||||
|
||||
### 加载 TDengine 时序数据
|
||||
## 数据分析
|
||||
|
||||
本章节演示如何使用 Seeq 软件加载 TDengine 时序数据。
|
||||
### 场景介绍
|
||||
|
||||
示例场景为一个电力系统,用户每天从电站仪表收集用电量数据,并将其存储在 TDengine 集群中。现在用户想要预测电力消耗将会如何发展,并购买更多设备来支持它。用户电力消耗随着每月订单变化而不同,另外考虑到季节变化,电力消耗量会有所不同。这个城市位于北半球,所以在夏天会使用更多的电力。我们模拟数据来反映这些假定。
|
||||
|
||||
### 数据准备
|
||||
|
||||
**第 1 步**,在 TDengine 中创建表。
|
||||
|
||||
|
@ -240,12 +242,6 @@ taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from
|
|||
}
|
||||
```
|
||||
|
||||
## 数据分析
|
||||
|
||||
### 场景介绍
|
||||
|
||||
示例场景为一个电力系统,用户每天从电站仪表收集用电量数据,并将其存储在 TDengine 集群中。现在用户想要预测电力消耗将会如何发展,并购买更多设备来支持它。用户电力消耗随着每月订单变化而不同,另外考虑到季节变化,电力消耗量会有所不同。这个城市位于北半球,所以在夏天会使用更多的电力。我们模拟数据来反映这些假定。
|
||||
|
||||
### 使用 Seeq Workbench
|
||||
|
||||
登录 Seeq 服务页面并新建 Seeq Workbench,通过选择数据源搜索结果和根据需要选择不同的工具,可以进行数据展现或预测,详细使用方法参见 [官方知识库](https://support.seeq.com/space/KB/146440193/Seeq+Workbench)。
|
||||
|
|
|
@ -4,38 +4,39 @@ title: 与 Superset 集成
|
|||
---
|
||||
Apache Superset 是一个现代的企业级商业智能(BI)Web 应用程序,主要用于数据探索和可视化。它由 Apache 软件基金会支持,是一个开源项目,它拥有活跃的社区和丰富的生态系统。Apache Superset 提供了直观的用户界面,使得创建、分享和可视化数据变得简单,同时支持多种数据源和丰富的可视化选项。
|
||||
|
||||
通过 TDengine 的 Python 连接器, Apache Superset 可支持 TDengine 数据源并提供数据展现、分析等功能
|
||||
通过 TDengine 的 Python 连接器, Apache Superset 可支持 TDengine 数据源并提供数据展现、分析等功能。
|
||||
|
||||
|
||||
## 前置条件
|
||||
|
||||
准备以下环境:
|
||||
- TDengine 集群已部署并正常运行(企业及社区版均可)
|
||||
- taosAdapter 能够正常运行。详细参考 [taosAdapter 使用手册](../../../reference/components/taosadapter)
|
||||
- Apache Superset v2.1.0 或以上版本已安装。安装 Apache Superset 请参考 [官方文档](https://superset.apache.org/)
|
||||
- TDengine 3.2.3.0 及以上版本集群已部署并正常运行(企业及社区版均可)。
|
||||
- taosAdapter 能够正常运行,详细参考 [taosAdapter 使用手册](../../../reference/components/taosadapter)。
|
||||
- Apache Superset v2.1.0 或以上版本已安装,安装 Apache Superset 请参考 [官方文档](https://superset.apache.org/)。
|
||||
- 安装 Python 连接器驱动,详细参考 [TDengine Python Connector](../../../reference/connector/python)。
|
||||
|
||||
|
||||
## 安装 TDengine Python 连接器
|
||||
|
||||
TDengine Python 连接器从 `v2.1.18` 起带 Superset 连接驱动,会安装至 Superset 相应目录下并向 Superset 提供数据源服务
|
||||
Superset 与 TDengine 之间使用 WebSocket 协议连接,需安装支持此协议的 `taos-ws-py` 组件, 全部安装脚本如下:
|
||||
```bash
|
||||
pip3 install taospy
|
||||
pip3 install taos-ws-py
|
||||
```
|
||||
|
||||
## 配置 TDengine 数据源
|
||||
|
||||
**第 1 步**,进入新建数据库连接页面 "Superset" → "Setting" → "Database Connections" → "+DATABASE"
|
||||
**第 2 步**,选择 TDengine 数据库连接。"SUPPORTED DATABASES" 下拉列表中选择 "TDengine" 项。
|
||||
:::tip
|
||||
注意:若下拉列表中无 "TDengine" 项,请检查安装顺序,确保 `TDengine Python 连接器` 在 `Superset` 安装之后再安装。
|
||||
TDengine Python 连接器从 `v2.1.18` 起带 Superset 连接驱动,会安装至 Superset 相应目录下并向 Superset 提供数据源服务。
|
||||
:::
|
||||
|
||||
## 配置数据源
|
||||
|
||||
**第 1 步**,进入新建数据库连接页面【Superset】 -> 【Setting】->【Database Connections ->【+DATABASE】。
|
||||
|
||||
**第 2 步**,选择 TDengine 数据库连接。【SUPPORTED DATABASES】下拉列表中选择 `TDengine` 项。
|
||||
|
||||
:::tip
|
||||
注意:若下拉列表中无 `TDengine` 项,请检查安装顺序,确保 `TDengine Python 连接器` 在 `Superset` 安装之后再安装。
|
||||
:::
|
||||
**第 3 步**,"DISPLAY NAME" 中填写连接名称,任意填写即可。
|
||||
**第 4 步**,"SQLALCHEMY URL" 项为关键连接信息串,务必填写正确。
|
||||
|
||||
**第 3 步**,【DISPLAY NAME】中填写连接名称,任意填写即可。
|
||||
|
||||
**第 4 步**,【SQLALCHEMY URL】项为关键连接信息串,务必填写正确。
|
||||
|
||||
```bash
|
||||
taosws://用户名:密码@主机名:端口号
|
||||
```
|
||||
|
||||
| 参数名称 | <center>参数说明</center> |
|
||||
|:------- |:-------------------------------- |
|
||||
| 用户名 | 登录 TDengine 数据库用户名 |
|
||||
|
@ -43,32 +44,33 @@ taosws://用户名:密码@主机名:端口号
|
|||
| 主机名 | TDengine 数据库所在主机名称 |
|
||||
| 端口号 | 提供 WebSocket 服务的端口,默认:6041 |
|
||||
|
||||
示例:
|
||||
本机安装 TDengine 数据库,WebSocket 服务端口 6041,使用默认用户名密码,"SQLALCHEMY URL" 应为:
|
||||
示例:
|
||||
|
||||
本机安装 TDengine 数据库,WebSocket 服务端口 6041,使用默认用户名密码,`SQLALCHEMY URL` 应为:
|
||||
|
||||
```bash
|
||||
taosws://root:taosdata@localhost:6041
|
||||
```
|
||||
**第 5 步**,配置好连接串,点击 “TEST CONNECTION” 测试连接是否成功,测试通过后点击 “CONNECT” 按钮,完成连接。
|
||||
**第 5 步**,配置好连接串,点击【TEST CONNECTION】测试连接是否成功,测试通过后点击【CONNECT】按钮,完成连接。
|
||||
|
||||
## 数据分析
|
||||
|
||||
## 开始使用
|
||||
### 数据准备
|
||||
|
||||
TDengine 数据源与其它数据源使用上无差别,这里简单介绍下数据查询:
|
||||
1. Superset 界面点击右上角 “+” 号按钮,选择 “SQL query”, 进入查询界面
|
||||
2. 左上角 “DATABASE” 下拉列表中选择前面已创建好的 “TDengine” 数据源
|
||||
3. “SCHEMA” 下拉列表,选择要操作的数据库名(系统库不显示)
|
||||
4. “SEE TABLE SCHEMA” 选择要操作的超级表名或普通表名(子表不显示)
|
||||
5. 随后会在下方显示选定表的 SCHEMA 信息
|
||||
6. 在 SQL 编辑器区域可输入符合 TDengine 语法的任意 SQL 语句执行
|
||||
TDengine 数据源与其它数据源使用上无差别,这里简单介绍下数据查询:
|
||||
|
||||
## 示例效果
|
||||
1. `Superset` 界面点击右上角【+】号按钮,选择 `SQL query`, 进入查询界面。
|
||||
2. 左上角【DATABASE】下拉列表中选择前面已创建好的 `TDengine` 数据源。
|
||||
3. 【SCHEMA】下拉列表,选择要操作的数据库名(系统库不显示)。
|
||||
4. 【SEE TABLE SCHEMA】选择要操作的超级表名或普通表名(子表不显示)。
|
||||
5. 随后会在下方显示选定表的 `SCHEMA` 信息。
|
||||
6. 在 `SQL` 编辑器区域可输入符合 `TDengine` 语法的任意 `SQL` 语句执行。
|
||||
|
||||
我们选择 Superset Chart 模板中较流行的两个模板做了效果展示,以智能电表数据为例:
|
||||
### 智能电表样例
|
||||
|
||||
1. "Aggregate" 类型,展示在第 4 组中指定时间段内每分钟采集电压值(voltage)最大值
|
||||
我们选择【Superset Chart】模板中较流行的两个模板做了效果展示,以智能电表数据为例:
|
||||
|
||||

|
||||
|
||||
2. "RAW RECORDS" 类型,展示在第 4 组中指定时间段内 current, voltage 的采集值
|
||||
|
||||

|
||||
1. `Aggregate` 类型,展示在第 4 组中指定时间段内每分钟采集电压值(voltage)最大值。
|
||||

|
||||
2. `RAW RECORDS` 类型,展示在第 4 组中指定时间段内 current, voltage 的采集值。
|
||||

|
||||
|
|
|
@ -11,7 +11,7 @@ Tableau 是一款知名的商业智能工具,它支持多种数据源,可方
|
|||
- TDengine 3.3.5.4 以上版本集群已部署并正常运行(企业及社区版均可)
|
||||
- taosAdapter 能够正常运行。详细参考 [taosAdapter 参考手册](../../../reference/components/taosadapter)
|
||||
- Tableau 桌面版安装并运行(如未安装,请下载并安装 Windows 操作系统 64 位 [Tableau 桌面版](https://www.tableau.com/products/desktop/download) )。安装 Tableau 桌面版请参考 [官方文档](https://www.tableau.com)。
|
||||
- 从TDengine官网下载最新的Windows操作系统X64客户端驱动程序,并进行安装。详细参考 [安装 ODBC 驱动](../../../reference/connector/odbc/#安装)。
|
||||
- 从 TDengine 官网下载最新的 Windows 操作系统 X64 客户端驱动程序,并进行安装。详细参考 [安装 ODBC 驱动](../../../reference/connector/odbc/#安装)。
|
||||
|
||||
|
||||
## 配置数据源
|
||||
|
|
|
@ -459,6 +459,7 @@ taosd 命令行参数如下
|
|||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
|
||||
:::info
|
||||
#### 区域相关参数说明
|
||||
1. 为应对多时区的数据写入和查询问题,TDengine 采用 Unix 时间戳(Unix Timestamp)来记录和存储时间戳。Unix 时间戳的特点决定了任一时刻不论在任何时区,产生的时间戳均一致。需要注意的是,Unix 时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的 Unix 时间戳,需要设置正确的时区。
|
||||
|
||||
在 Linux/macOS 中,客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如:
|
||||
|
@ -1704,30 +1705,24 @@ taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeep
|
|||
| duration | VARCHAR | TAG | sql执行耗时,取值范围 3-10s,10-100s,100-1000s,1000s- |
|
||||
| cluster\_id | VARCHAR | TAG | cluster id |
|
||||
|
||||
## 日志相关
|
||||
### taos\_slow\_sql\_detail 表
|
||||
|
||||
TDengine 通过日志文件记录系统运行状态,帮助用户监控系统运行情况,排查问题,这里主要介绍 taosc 和 taosd 两个系统日志的相关说明。
|
||||
`taos_slow_sql_detail` 记录客户端慢查询详细信息。子表名规则为 `{user}_{db}_{ip}_clusterId_{cluster_id}`
|
||||
|
||||
TDengine 的日志文件主要包括普通日志和慢日志两种类型。
|
||||
|
||||
1. 普通日志行为说明
|
||||
1. 同一台机器上可以起多个客户端进程,所以客户端日志命名方式为 taoslogX.Y,其中 X 为序号,为空或者 0 到 9,Y 为后缀 0 或者 1。
|
||||
2. 同一台机器上只能有一个服务端进程。所以服务端日志命名方式为 taosdlog.Y,其中 Y 为后缀, 0 或者 1。
|
||||
|
||||
序号和后缀确定规则如下(假设日志路径为 /var/log/taos/):
|
||||
1. 确定序号:使用 10 个序号作为日志命名方式,/var/log/taos/taoslog0.Y - /var/log/taos/taoslog9.Y,依次检测每个序号是否使用,找到第一个没使用的序号作为该进程的日志文件使用的序号。 如果 10 个序号都被进程使用,不使用序号,即 /var/log/taos/taoslog.Y,进程都往相同的文件里写(序号为空)。
|
||||
2. 确定后缀:0 或者 1。比如确定序号为 3,备选的日志文件名就为 /var/log/taos/taoslog3.0 /var/log/taos/taoslog3.1。如果两个文件都不存在用后缀 0,一个存在一个不存在,用存在的后缀。两个都存在,用修改时间最近的那个后缀。
|
||||
3. 如果日志文件超过配置的条数 numOfLogLines,会切换后缀名,继续写日志,比如/var/log/taos/taoslog3.0 写够了,切换到 /var/log/taos/taoslog3.1 继续写日志。/var/log/taos/taoslog3.0 会添加时间戳后缀重命名并压缩存储(异步线程操作)。
|
||||
4. 通过配置 logKeepDays 控制日志文件保存几天,几天之外的日志会被删除。比如配置为 1,则一天之前的日志会在新日志压缩存储时检测删除。不是自然天。
|
||||
|
||||
系统除了记录普通日志以外,对于执行时间超过配置时间的 SQL 语句,会被记录到慢日志中。慢日志文件主要用于分析系统性能,排查性能问题。
|
||||
|
||||
2. 慢日志行为说明
|
||||
1. 慢日志一方面会记录到本地慢日志文件中,另一方面会通过 taosAdapter 发送到 taosKeeper 进行结构化存储(需打开 monitorr 开关)。
|
||||
2. 慢日志文件存储规则为:
|
||||
1. 慢日志文件一天一个,如果当天没有慢日志,没有当天的文件。
|
||||
2. 文件名为 taosSlowLog.yyyy-mm-dd(taosSlowLog.2024-08-02),日志存储路径通过 logDir 配置。
|
||||
3. 多个客户端的日志存储在相应日志路径下的同一个 taosSlowLog.yyyy.mm.dd 文件里。
|
||||
4. 慢日志文件不自动删除,不压缩。
|
||||
5. 使用和普通日志文件相同的三个参数 logDir, minimalLogDirGB, asyncLog。另外两个参数 numOfLogLines,logKeepDays 不适用于慢日志。
|
||||
| field | type | is\_tag | comment |
|
||||
| :------------- | :-------- | :------ | :---------------------------------------------------- |
|
||||
| start\_ts | TIMESTAMP | | sql 开始执行的客户端时间,单位ms,主键 |
|
||||
| request\_id | UINT64_T | | sql 请求的 request id,为 hash 生产的随机值 |
|
||||
| query\_time | INT32_T | | sql 执行耗时, 单位ms |
|
||||
| code | INT32_T | | sql 执行返回码,0表示成功 |
|
||||
| error\_info | VARCHAR | | sql 执行失败时,记录的错误信息 |
|
||||
| type | INT8_T | | sql 语句的类型(1-查询,2-写入,4-其他) |
|
||||
| rows\_num | INT64_T | | sql 执行结果的记录数目 |
|
||||
| sql | VARCHAR | | sql 语句的字符串 |
|
||||
| process\_name | VARCHAR | | 进程名称 |
|
||||
| process\_id | VARCHAR | | 进程 id |
|
||||
| db | VARCHAR | TAG | 执行 sql 所属数据库 |
|
||||
| user | VARCHAR | TAG | 执行 sql 语句的用户 |
|
||||
| ip | VARCHAR | TAG | 记录执行 sql 语句的 client 的 ip 地址 |
|
||||
| cluster\_id | VARCHAR | TAG | cluster id |
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ description: TDengine 保留关键字的详细列表
|
|||
| CLIENT_VERSION | |
|
||||
| CLUSTER | |
|
||||
| COLON | |
|
||||
| COLS | |
|
||||
| COLS | 3.3.6.0 及后续版本 |
|
||||
| COLUMN | |
|
||||
| COMMA | |
|
||||
| COMMENT | |
|
||||
|
|
|
@ -4,6 +4,7 @@ title: 权限管理
|
|||
---
|
||||
|
||||
TDengine 中的权限管理分为[用户管理](../user)、数据库授权管理以及消息订阅授权管理,本节重点说明数据库授权和订阅授权。
|
||||
授权管理仅在 TDengine 企业版中可用,请联系 TDengine 销售团队。授权语法在社区版可用,但不起作用。
|
||||
|
||||
## 数据库访问授权
|
||||
|
||||
|
|
|
@ -68,6 +68,10 @@ WebSocket Connector 历史版本:
|
|||
|
||||
| WebSocket Connector 版本 | 主要变化 | TDengine 版本 |
|
||||
| ----------------------- | ------------------------------------------------------------------------------------ | ----------------- |
|
||||
| 0.3.9 | 修复 fetchmany 自定义行数时获取不完全的问题 | - |
|
||||
| 0.3.8 | 支持 SuperSet 连接到 TDengine 云服务实例 | - |
|
||||
| 0.3.5 | 修复 crypto provider 中的问题 | - |
|
||||
| 0.3.4 | 支持 VARBINARY 和 GEOMETRY 数据类型 | 3.3.0.0 及更高版本 |
|
||||
| 0.3.2 | 优化 WebSocket sql 查询和插入性能,修改 readme 和 文档,修复已知问题 | 3.2.3.0 及更高版本 |
|
||||
| 0.2.9 | 已知问题修复 | - |
|
||||
| 0.2.5 | 1. 数据订阅支持获取消费进度和重置消费进度 <br/> 2. 支持 schemaless <br/> 3. 支持 STMT | - |
|
||||
|
|
|
@ -0,0 +1,107 @@
|
|||
---
|
||||
sidebar_label: 日志系统
|
||||
title: 日志系统
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
TDengine 通过日志文件记录系统运行状态,帮助用户监控系统运行情况,排查问题。Log 分为普通日志和慢日志。引擎测的运行状态通过普通日志的方式记录下来,系统运行相关的慢日志操作则记录到慢日志文件里。
|
||||
|
||||
## 普通日志
|
||||
|
||||
### 普通日志实现逻辑
|
||||
|
||||
- 普通日志分同步和异步两种方式,同步立即写入日志文件,异步写入到 buff 里,然后定时写入日志文件。
|
||||
- 异步方式日志文件缓存在循环 buff 里, buff 的大小为 buffSize = 20 M。如果某次写buf 的日志大小大于buf 可用空间,本次日志会舍弃,日志里记录: ...Lost N lines here...
|
||||

|
||||
- 异步线程里每隔 1 s 会更新磁盘信息用于判断是否有空间写日志
|
||||
- 异步线程每隔 Interval 时间处理一次写入逻辑。写入规则如下:
|
||||
- 如果buff 里数据小于 buffSize/10,不写入磁盘,除非超过1 s。
|
||||
- 如果buff 里数据大于 buffSize/10,全部写入磁盘。
|
||||
- Interval 默认值为 25 ms,Interval 值会根据每次写入日志的大小动态调整。Interval 调试规则如下:
|
||||
- 数据量小时(小于 buffSize/10),增大写入间隔,Interval 每次增加 5ms,最大25ms。
|
||||
- 数据量大时(大于 buffSize/3),写入间隔最小,Interval 为 5ms。
|
||||
- 数据量比较大时(大于 buffSize/4,小于等于buffSize/3),减小写入间隔,Interval 每次减小 5ms,最小5ms。
|
||||
- 数据量适中时(大于等于 buffSize/10,小于等于buffSize/4),写入间隔不变。
|
||||

|
||||
|
||||
### 普通日志行为说明
|
||||
- 普通日志命名规则
|
||||
- 同一台机器上可以起多个客户端进程,所以客户端日志命名方式为 taoslogX.Y,其中 X 为序号,为空或者 0 到 9,Y 为后缀 0 或者 1 (windows 限制只有一个序号,所以格式为 taoslog.Y)。
|
||||
- 同一台机器上可以起多个服务端进程。所以服务端日志命名方式为 taosdlog.Y,其中 Y 为后缀, 0 或者 1。
|
||||
- 序号和后缀确定规则如下(假设日志路径为 /var/log/taos/)
|
||||
- 确定序号:使用 10 个序号作为日志命名方式,/var/log/taos/taoslog0.Y - /var/log/taos/taoslog9.Y,依次检测每个序号是否使用,找到第一个没使用的序号作为该进程的日志文件使用的序号。 如果 10 个序号都被进程使用,不使用序号,即 /var/log/taos/taoslog.Y,进程都往相同的文件里写(序号为空)。
|
||||
- 确定后缀:0 或者 1。比如确定序号为 3,备选的日志文件名就为 /var/log/taos/taoslog3.0 /var/log/taos/taoslog3.1。如果两个文件都不存在用后缀 0,一个存在一个不存在,用存在的后缀。两个都存在,用修改时间最近的那个后缀。
|
||||
- 如果日志文件超过配置的条数 numOfLogLines,会切换后缀名,继续写日志,比如/var/log/taos/taoslog3.0 写够了,切换到 /var/log/taos/taoslog3.1 继续写日志。/var/log/taos/taoslog3.0 会添加时间戳后缀重命名并压缩存储(异步线程操作)。
|
||||
- 通过配置 logKeepDays 控制日志文件保存几天,几天之外的日志会被删除。比如配置为 1,则一天之前的日志会在新日志压缩存储时检测删除。不是自然天。
|
||||
- 当文件里日志行数大于 numOfLogLines(默认 1000w,取值范围 1000-20亿)时,会触发日志归档。
|
||||
- 举例:taoslog3.0 写满了,切换到 taoslog3.1 继续写。taoslog3.0 重命名为 taoslog.1735616543,然后压缩为 taoslog.1735616543.gz。同时,如果 logKeepDays > 0,会检测是否有超时的日志文件,然后删除。(该过程异步执行)
|
||||
|
||||
## 慢日志
|
||||
|
||||
系统除了记录普通日志以外,对于执行时间超过配置时间的操作,会被记录到慢日志中。慢日志文件主要用于分析系统性能,排查性能问题。
|
||||
### 慢日志实现逻辑
|
||||
#### 上报架构
|
||||

|
||||
#### 缓存逻辑
|
||||
- 为了提高上报效率,慢 sql 日志上报方式为批量上报。
|
||||
- 慢 sql 日志上报为了防止缓存丢失,采用写临时文件方式来实现缓存(crash 后不会丢失)。
|
||||
- 每生成一条慢 sql 日志都会放到队列里,然后通知 slow log 线程从队列获取数据,slow log 线程根据数据里 clusterId 写到不同的文件里。
|
||||
数据格式如下(其中,clusterId 为当前日志所属的慢查询集群id,value 为一条数据(json字符串形式))
|
||||
```c
|
||||
typedef struct {
|
||||
int64_t clusterId;
|
||||
char *value;
|
||||
}MonitorSlowLogData
|
||||
```
|
||||
- 说明:
|
||||
- 因为客户端进程里可能存在很多个链接 connection,所以需要将慢查询日志根据 clusterId 来分组。分组方式通过临时文件名来实现,命名方式为 ```{tmp dir}/tdengine_slow_log/tdengeine-{clusterId1}-{processId}-{rand}```,processId 为进程ID,主要为了区分多个客户端的上报。
|
||||
- 如上图 connection 1 连接的是 cluster 1。connection 2,connection 3 连接的是 cluster 2,所以connection 1 的慢 sql 数据写入文件 ```{tmp dir}/tdengine_slow_log/tdengeine-{clusterId1}-{processId}-{rand}```,connection 2 和 connection 3的慢 sql 数据写入文件 ```{tmp dir}/tdengine_slow_log/tdengeine-{clusterId1}-{processId}-{rand}```
|
||||
#### 上报逻辑
|
||||
- 读取 ```{tmp dir}/tdengine_slow_log/tdengeine-{clusterId1}-{processId}-{rand}``` 临时文件内容,每行数据作为 json 数组的一个元素,组装成 json 数组上报(文件里数据每接近 1M大小上报一次,上报成功后记录读取文件进度,上报采用异步上报方式。在 callback 里继续根据上次的进度,继续读取文件的内容上报,直至整个文件读取上报完毕,上报完毕后,会清空临时文件,callback 里成功或失败都会继续读取文件,失败时会记录上报失败的数据日志)。每接近 1M 上报一次主要为了防止文件太大,放在一次上报失败)。
|
||||
#### 上报时机
|
||||
- 客户端运行过程中定时上报
|
||||
- 每个 monitorInterval 时间间隔上报数据。
|
||||
- 客户端正常退出
|
||||
- 上报所有慢 sql 日志文件, 上报成功后,删除文件。
|
||||
- 客户端异常退出
|
||||
- 异常退出后再次与某个集群(clusterId)建立新的链接后遍历 ```{tmp dir}/tdengine_slow_log/``` 目录下 ```tdengine-{clusterId}``` 开头的所有文件进行重新上报(这些文件可能是另一个客户端进程或本进程正在操作的。所以每个文件打开时都需要添加文件锁),然后删除这个临时文件。
|
||||
#### 一些异常行为说明
|
||||
- 因为上报数据和删除文件里的上报内容没法作为一个原子操作,所以如果上报后还没删除数据就 crash,可能导致下次重复上报,重复上报的数据会覆盖,并没丢失,影响很小。
|
||||
- 另外为了保证性能, slow log thread 线程把慢 sql 日志写入临时文件缓存,只保证刷新到操作系统的磁盘缓冲区,并不真正每次都 fsync 到磁盘,所以如果机器断电,仍可能丢失数据。该异常出现概率很小,可以容忍此种情况下的数据丢失。
|
||||
### 慢日志行为说明
|
||||
- 慢日志一方面会记录到本地慢日志文件中,另一方面会通过 taosAdapter 发送到 taosKeeper 进行结构化存储(需打开 monitorr 开关)。
|
||||
- 慢日志文件存储规则为:
|
||||
- 慢日志文件一天一个,如果当天没有慢日志,没有当天的文件。
|
||||
- 文件名为 taosSlowLog.yyyy-mm-dd(taosSlowLog.2024-08-02),日志存储路径通过 logDir 配置。
|
||||
- 多个客户端的日志存储在相应日志路径下的同一个 taosSlowLog.yyyy.mm.dd 文件里。
|
||||
- 慢日志文件不自动删除,不压缩。
|
||||
- 使用和普通日志文件相同的三个参数 logDir, minimalLogDirGB, asyncLog。另外两个参数 numOfLogLines,logKeepDays 不适用于慢日志。
|
||||
|
||||
## 日志级别说明
|
||||
|
||||
日志级别分为9种,如下所示:
|
||||
|
||||
```c
|
||||
typedef enum {
|
||||
DEBUG_FATAL = 1,
|
||||
DEBUG_ERROR = 1,
|
||||
DEBUG_WARN = 2,
|
||||
DEBUG_INFO = 2,
|
||||
DEBUG_DEBUG = 4,
|
||||
DEBUG_TRACE = 8,
|
||||
DEBUG_DUMP = 16,
|
||||
DEBUG_SCREEN = 64,
|
||||
DEBUG_FILE = 128
|
||||
} ELogLevel;
|
||||
```
|
||||
|
||||
日志开关通过 bit 位来控制,具体如下:
|
||||
|
||||

|
||||
|
||||
例如:
|
||||
- 131 = 128 + 2 + 1 文件 + info + error
|
||||
- 135 = 128 + 4 + 2 + 1 文件 + debug + info + error
|
||||
- 143 = 128 + 8 + 4 + 2 + 1 文件 + trace + debug + info + error
|
||||
|
||||
通过设置日志开关的参数,可以开启不同级别的日志。
|
Binary file not shown.
After Width: | Height: | Size: 72 KiB |
Binary file not shown.
After Width: | Height: | Size: 64 KiB |
Binary file not shown.
After Width: | Height: | Size: 446 KiB |
Binary file not shown.
After Width: | Height: | Size: 46 KiB |
|
@ -0,0 +1,14 @@
|
|||
---
|
||||
title: 产品路线图
|
||||
---
|
||||
|
||||
TDengine OSS 之 2025 年年度路线图如下表所示。
|
||||
|
||||
| 季度 | 功能 |
|
||||
| :----- | :----- |
|
||||
| 2025Q1 | <ol><li>虚拟表</li><li>查询能力:<code>REGEXP</code>、<code>GREATEST</code>、<code>LEAST</code>、<code>CAST</code> 函数支持判断表达式、单行选择函数的其他列值、<code>INTERP</code> 支持插值时间范围</li><li>存储能力:支持将查询结果写入超级表、超级表支持 <code>KEEP</code> 参数、STMT 写入性能提升</li><li>流计算:支持虚拟表、计算资源优化、事件通知机制、创建时间优化</li><li>数据类型:Decimal</li><li>高可用:加快宕机恢复时间、优化客户端 Failover 机制</li><li>稳定性:开始维护新的稳定版本 3.3.6.x</li><li>JDBC:高效写入</li><li>生态工具:对接 Tableau</li><li>生态工具:对接 Excel</li></ol> |
|
||||
| 2025Q2 | <ol><li>查询能力:大幅放宽关联查询限制、支持 MySQL 所有数学函数、支持积分/积分平均/连续方差函数、<code>CSUM</code> 函数优化、<code>COUNT(DISTINCT)</code> 语法、事件窗口功能增强、提升标签过滤性能、提升 <code>INTERP</code> 查询性能</li><li>存储能力:TSMA 计算资源优化、写入抖动优化</li><li>流计算:节点高可用</li><li>数据类型:BLOB</li><li>数据订阅:支持 MQTT 协议</li><li>高可用:提高副本变更速度、提高集群宕机恢复速度、优化断电数据恢复机制</li><li>可观测性:写入诊断工具</li><li>生态工具:对接帆软 FineBI</li></ol> |
|
||||
| 2025Q3 | <ol><li>查询能力:支持更多子查询类型、支持 MySQL 运算符、支持 MySQL 所有时间函数、窗口计算逻辑优化、查询性能抖动、计数窗口允许指定列</li><li>存储能力:提高 SQL 模式写入速度</li><li>可观测性:查询诊断工具、优化 <code>EXPLAIN</code> 输出、长任务观测</li></ol> |
|
||||
| 2025Q4 | <ol><li>查询能力:窗口函数(<code>OVER</code> 子句)、支持 MySQL 所有字符串/聚合/条件函数、Partition 支持组内排序、控制查询资源占用、提高子表聚合查询性能、<code>INTERVAL</code> 窗口支持插值时间范围</li><li>数据类型:支持不定长度字符串数据类型</li><li>数据缓存:提升按行缓存性能</li><li>可观测性:增强运维可观测性</li></ol> |
|
||||
|
||||
欲了解更多信息,请参见 [TDengine Public Roadmap](https://github.com/orgs/taosdata/projects/4) 。
|
|
@ -39,38 +39,41 @@ static FORCE_INLINE int32_t stmtAllocQNodeFromBuf(STableBufInfo* pTblBuf, void**
|
|||
}
|
||||
|
||||
bool stmtDequeue(STscStmt* pStmt, SStmtQNode** param) {
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
int i = 0;
|
||||
while (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) {
|
||||
(void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex);
|
||||
if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) {
|
||||
if (i < 10) {
|
||||
taosUsleep(1);
|
||||
i++;
|
||||
} else {
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
if (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) {
|
||||
(void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex);
|
||||
}
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (pStmt->queue.stopQueue) {
|
||||
return false;
|
||||
}
|
||||
SStmtQNode* orig = pStmt->queue.head;
|
||||
SStmtQNode* node = pStmt->queue.head->next;
|
||||
pStmt->queue.head = pStmt->queue.head->next;
|
||||
*param = node;
|
||||
|
||||
(void)atomic_sub_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1);
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
|
||||
|
||||
*param = node;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void stmtEnqueue(STscStmt* pStmt, SStmtQNode* param) {
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
|
||||
pStmt->queue.tail->next = param;
|
||||
pStmt->queue.tail = param;
|
||||
|
||||
pStmt->stat.bindDataNum++;
|
||||
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
(void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1);
|
||||
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
|
||||
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
}
|
||||
|
||||
|
@ -423,11 +426,9 @@ void stmtResetQueueTableBuf(STableBufInfo* pTblBuf, SStmtQueue* pQueue) {
|
|||
pTblBuf->buffIdx = 1;
|
||||
pTblBuf->buffOffset = sizeof(*pQueue->head);
|
||||
|
||||
(void)taosThreadMutexLock(&pQueue->mutex);
|
||||
pQueue->head = pQueue->tail = pTblBuf->pCurBuff;
|
||||
pQueue->qRemainNum = 0;
|
||||
pQueue->head->next = NULL;
|
||||
(void)taosThreadMutexUnlock(&pQueue->mutex);
|
||||
}
|
||||
|
||||
int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool deepClean) {
|
||||
|
@ -778,7 +779,7 @@ void* stmtBindThreadFunc(void* param) {
|
|||
STscStmt* pStmt = (STscStmt*)param;
|
||||
|
||||
while (true) {
|
||||
if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) {
|
||||
if (pStmt->queue.stopQueue) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1630,8 +1631,9 @@ int stmtClose(TAOS_STMT* stmt) {
|
|||
STMT_DLOG_E("start to free stmt");
|
||||
|
||||
pStmt->queue.stopQueue = true;
|
||||
|
||||
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
(void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1);
|
||||
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
|
||||
|
|
|
@ -39,34 +39,41 @@ static FORCE_INLINE int32_t stmtAllocQNodeFromBuf(STableBufInfo* pTblBuf, void**
|
|||
}
|
||||
|
||||
static bool stmtDequeue(STscStmt2* pStmt, SStmtQNode** param) {
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
int i = 0;
|
||||
while (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) {
|
||||
(void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex);
|
||||
if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) {
|
||||
if (i < 10) {
|
||||
taosUsleep(1);
|
||||
i++;
|
||||
} else {
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
if (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) {
|
||||
(void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex);
|
||||
}
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (pStmt->queue.stopQueue) {
|
||||
return false;
|
||||
}
|
||||
SStmtQNode* orig = pStmt->queue.head;
|
||||
SStmtQNode* node = pStmt->queue.head->next;
|
||||
pStmt->queue.head = pStmt->queue.head->next;
|
||||
*param = node;
|
||||
|
||||
(void)atomic_sub_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1);
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void stmtEnqueue(STscStmt2* pStmt, SStmtQNode* param) {
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
|
||||
pStmt->queue.tail->next = param;
|
||||
pStmt->queue.tail = param;
|
||||
pStmt->stat.bindDataNum++;
|
||||
(void)atomic_add_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1);
|
||||
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
|
||||
|
||||
pStmt->stat.bindDataNum++;
|
||||
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
(void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1);
|
||||
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
}
|
||||
|
||||
|
@ -343,11 +350,9 @@ static void stmtResetQueueTableBuf(STableBufInfo* pTblBuf, SStmtQueue* pQueue) {
|
|||
pTblBuf->buffIdx = 1;
|
||||
pTblBuf->buffOffset = sizeof(*pQueue->head);
|
||||
|
||||
(void)taosThreadMutexLock(&pQueue->mutex);
|
||||
pQueue->head = pQueue->tail = pTblBuf->pCurBuff;
|
||||
pQueue->qRemainNum = 0;
|
||||
pQueue->head->next = NULL;
|
||||
(void)taosThreadMutexUnlock(&pQueue->mutex);
|
||||
}
|
||||
|
||||
static int32_t stmtCleanExecInfo(STscStmt2* pStmt, bool keepTable, bool deepClean) {
|
||||
|
@ -704,7 +709,7 @@ static void* stmtBindThreadFunc(void* param) {
|
|||
STscStmt2* pStmt = (STscStmt2*)param;
|
||||
|
||||
while (true) {
|
||||
if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) {
|
||||
if (pStmt->queue.stopQueue) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1788,6 +1793,7 @@ int stmtClose2(TAOS_STMT2* stmt) {
|
|||
pStmt->queue.stopQueue = true;
|
||||
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
(void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1);
|
||||
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ void mndReleaseStream(SMnode *pMnode, SStreamObj *pStream);
|
|||
int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb);
|
||||
int32_t mndPersistStream(STrans *pTrans, SStreamObj *pStream);
|
||||
int32_t mndStreamRegisterTrans(STrans *pTrans, const char *pTransName, int64_t streamId);
|
||||
int32_t mndStreamClearFinishedTrans(SMnode *pMnode, int32_t *pNumOfActiveChkpt);
|
||||
int32_t mndStreamClearFinishedTrans(SMnode *pMnode, int32_t *pNumOfActiveChkpt, SArray*pLongChkptTrans);
|
||||
int32_t mndStreamTransConflictCheck(SMnode *pMnode, int64_t streamId, const char *pTransName, bool lock);
|
||||
int32_t mndStreamGetRelTrans(SMnode *pMnode, int64_t streamId);
|
||||
|
||||
|
@ -159,6 +159,7 @@ void removeTasksInBuf(SArray *pTaskIds, SStreamExecInfo *pExecInfo);
|
|||
int32_t mndFindChangedNodeInfo(SMnode *pMnode, const SArray *pPrevNodeList, const SArray *pNodeList,
|
||||
SVgroupChangeInfo *pInfo);
|
||||
void killAllCheckpointTrans(SMnode *pMnode, SVgroupChangeInfo *pChangeInfo);
|
||||
void killChkptAndResetStreamTask(SMnode *pMnode, SArray *pLongChkpts);
|
||||
bool isNodeUpdateTransActive();
|
||||
|
||||
int32_t createStreamTaskIter(SStreamObj *pStream, SStreamTaskIter **pIter);
|
||||
|
|
|
@ -1339,17 +1339,47 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) {
|
|||
SStreamObj *pStream = NULL;
|
||||
int32_t code = 0;
|
||||
int32_t numOfCheckpointTrans = 0;
|
||||
SArray *pLongChkpts = NULL;
|
||||
SArray *pList = NULL;
|
||||
int64_t now = taosGetTimestampMs();
|
||||
|
||||
if ((code = mndCheckTaskAndNodeStatus(pMnode)) != 0) {
|
||||
return TSDB_CODE_STREAM_TASK_IVLD_STATUS;
|
||||
}
|
||||
|
||||
SArray *pList = taosArrayInit(4, sizeof(SCheckpointInterval));
|
||||
pList = taosArrayInit(4, sizeof(SCheckpointInterval));
|
||||
if (pList == NULL) {
|
||||
mError("failed to init chkptInterval info, not handle stream checkpoint, code:%s", tstrerror(terrno));
|
||||
return terrno;
|
||||
}
|
||||
|
||||
int64_t now = taosGetTimestampMs();
|
||||
pLongChkpts = taosArrayInit(4, sizeof(SStreamTransInfo));
|
||||
if (pLongChkpts == NULL) {
|
||||
mError("failed to init long checkpoint list, not handle stream checkpoint, code:%s", tstrerror(terrno));
|
||||
taosArrayDestroy(pList);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
// check if ongong checkpoint trans or long chkpt trans exist.
|
||||
code = mndStreamClearFinishedTrans(pMnode, &numOfCheckpointTrans, pLongChkpts);
|
||||
if (code) {
|
||||
mError("failed to clear finish trans, code:%s", tstrerror(code));
|
||||
|
||||
taosArrayDestroy(pList);
|
||||
taosArrayDestroy(pLongChkpts);
|
||||
return code;
|
||||
}
|
||||
|
||||
// kill long exec checkpoint and set task status
|
||||
if (taosArrayGetSize(pLongChkpts) > 0) {
|
||||
killChkptAndResetStreamTask(pMnode, pLongChkpts);
|
||||
|
||||
taosArrayDestroy(pList);
|
||||
taosArrayDestroy(pLongChkpts);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
taosArrayDestroy(pLongChkpts);
|
||||
|
||||
while ((pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream)) != NULL) {
|
||||
int64_t duration = now - pStream->checkpointFreq;
|
||||
|
@ -1385,12 +1415,6 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
taosArraySort(pList, streamWaitComparFn);
|
||||
code = mndStreamClearFinishedTrans(pMnode, &numOfCheckpointTrans);
|
||||
if (code) {
|
||||
mError("failed to clear finish trans, code:%s", tstrerror(code));
|
||||
taosArrayDestroy(pList);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t numOfQual = taosArrayGetSize(pList);
|
||||
if (numOfCheckpointTrans >= tsMaxConcurrentCheckpoint) {
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
#include "mndStream.h"
|
||||
#include "mndTrans.h"
|
||||
|
||||
#define MAX_CHKPT_EXEC_ELAPSED (600*1000) // 600s
|
||||
|
||||
typedef struct SKeyInfo {
|
||||
void *pKey;
|
||||
int32_t keyLen;
|
||||
|
@ -31,11 +33,12 @@ int32_t mndStreamRegisterTrans(STrans *pTrans, const char *pTransName, int64_t s
|
|||
return taosHashPut(execInfo.transMgmt.pDBTrans, &streamId, sizeof(streamId), &info, sizeof(SStreamTransInfo));
|
||||
}
|
||||
|
||||
int32_t mndStreamClearFinishedTrans(SMnode *pMnode, int32_t *pNumOfActiveChkpt) {
|
||||
int32_t mndStreamClearFinishedTrans(SMnode *pMnode, int32_t *pNumOfActiveChkpt, SArray*pLongChkptTrans) {
|
||||
size_t keyLen = 0;
|
||||
void *pIter = NULL;
|
||||
SArray *pList = taosArrayInit(4, sizeof(SKeyInfo));
|
||||
int32_t numOfChkpt = 0;
|
||||
int64_t now = taosGetTimestampMs();
|
||||
|
||||
if (pNumOfActiveChkpt != NULL) {
|
||||
*pNumOfActiveChkpt = 0;
|
||||
|
@ -63,6 +66,18 @@ int32_t mndStreamClearFinishedTrans(SMnode *pMnode, int32_t *pNumOfActiveChkpt)
|
|||
} else {
|
||||
if (strcmp(pEntry->name, MND_STREAM_CHECKPOINT_NAME) == 0) {
|
||||
numOfChkpt++;
|
||||
|
||||
// last for 10min, kill it
|
||||
int64_t dur = now - pTrans->createdTime;
|
||||
if ((dur >= MAX_CHKPT_EXEC_ELAPSED) && (pLongChkptTrans != NULL)) {
|
||||
mInfo("long chkpt transId:%d, start:%" PRId64
|
||||
" exec duration:%.2fs, beyond threshold %.2f min, kill it and reset task status",
|
||||
pTrans->id, pTrans->createdTime, dur / 1000.0, MAX_CHKPT_EXEC_ELAPSED/(1000*60.0));
|
||||
void* p = taosArrayPush(pLongChkptTrans, pEntry);
|
||||
if (p == NULL) {
|
||||
mError("failed to add long checkpoint trans, transId:%d, code:%s", pEntry->transId, tstrerror(terrno));
|
||||
}
|
||||
}
|
||||
}
|
||||
mndReleaseTrans(pMnode, pTrans);
|
||||
}
|
||||
|
@ -101,7 +116,7 @@ static int32_t doStreamTransConflictCheck(SMnode *pMnode, int64_t streamId, cons
|
|||
}
|
||||
|
||||
// if any task updates exist, any other stream trans are not allowed to be created
|
||||
int32_t code = mndStreamClearFinishedTrans(pMnode, NULL);
|
||||
int32_t code = mndStreamClearFinishedTrans(pMnode, NULL, NULL);
|
||||
if (code) {
|
||||
mError("failed to clear finish trans, code:%s, and continue", tstrerror(code));
|
||||
}
|
||||
|
@ -160,7 +175,7 @@ int32_t mndStreamGetRelTrans(SMnode *pMnode, int64_t streamId) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t code = mndStreamClearFinishedTrans(pMnode, NULL);
|
||||
int32_t code = mndStreamClearFinishedTrans(pMnode, NULL, NULL);
|
||||
if (code) {
|
||||
mError("failed to clear finish trans, code:%s", tstrerror(code));
|
||||
}
|
||||
|
@ -361,3 +376,37 @@ void killAllCheckpointTrans(SMnode *pMnode, SVgroupChangeInfo *pChangeInfo) {
|
|||
|
||||
mDebug("complete clear checkpoints in all Dbs");
|
||||
}
|
||||
|
||||
void killChkptAndResetStreamTask(SMnode *pMnode, SArray* pLongChkpts) {
|
||||
int32_t code = 0;
|
||||
int64_t now = taosGetTimestampMs();
|
||||
int32_t num = taosArrayGetSize(pLongChkpts);
|
||||
|
||||
mInfo("start to kill %d long checkpoint trans", num);
|
||||
|
||||
for(int32_t i = 0; i < num; ++i) {
|
||||
SStreamTransInfo* pTrans = (SStreamTransInfo*) taosArrayGet(pLongChkpts, i);
|
||||
if (pTrans == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
double el = (now - pTrans->startTime) / 1000.0;
|
||||
mInfo("stream:0x%" PRIx64 " start to kill ongoing long checkpoint transId:%d, elapsed time:%.2fs. killed",
|
||||
pTrans->streamId, pTrans->transId, el);
|
||||
|
||||
SStreamObj *p = NULL;
|
||||
code = mndGetStreamObj(pMnode, pTrans->streamId, &p);
|
||||
if (code == 0 && p != NULL) {
|
||||
mndKillTransImpl(pMnode, pTrans->transId, p->sourceDb);
|
||||
|
||||
mDebug("stream:%s 0x%" PRIx64 " transId:%d checkpointId:%" PRId64 " create reset task trans", p->name,
|
||||
pTrans->streamId, pTrans->transId, p->checkpointId);
|
||||
|
||||
code = mndCreateStreamResetStatusTrans(pMnode, p, p->checkpointId);
|
||||
if (code) {
|
||||
mError("stream:%s 0x%"PRIx64" failed to create reset stream task, code:%s", p->name, p->uid, tstrerror(code));
|
||||
}
|
||||
sdbRelease(pMnode->pSdb, p);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -876,7 +876,7 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
|
|||
}
|
||||
|
||||
pResult->info.rows = 1;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
if (pResult != pSrcBlock) {
|
||||
|
|
|
@ -251,6 +251,11 @@ int32_t updateEventWindowInfo(SStreamAggSupporter* pAggSup, SEventWindowInfo* pW
|
|||
pWin->ekey = pTsData[i];
|
||||
pWinInfo->pWinFlag->endFlag = ends[i];
|
||||
} else if (pWin->ekey == pTsData[i]) {
|
||||
if (pWinInfo->pWinFlag->endFlag == true && ends[i] == false) {
|
||||
(*pWinRow) = i + 1 - start;
|
||||
*pRebuild = true;
|
||||
goto _end;
|
||||
}
|
||||
pWinInfo->pWinFlag->endFlag |= ends[i];
|
||||
} else if (ends[i] && !pWinInfo->pWinFlag->endFlag) {
|
||||
*pRebuild = true;
|
||||
|
|
|
@ -474,14 +474,12 @@ void destroyFlusedppPos(void* ppRes) {
|
|||
}
|
||||
|
||||
void clearGroupResInfo(SGroupResInfo* pGroupResInfo) {
|
||||
if (pGroupResInfo->freeItem) {
|
||||
int32_t size = taosArrayGetSize(pGroupResInfo->pRows);
|
||||
for (int32_t i = pGroupResInfo->index; i < size; i++) {
|
||||
void* pPos = taosArrayGetP(pGroupResInfo->pRows, i);
|
||||
destroyFlusedPos(pPos);
|
||||
}
|
||||
pGroupResInfo->freeItem = false;
|
||||
int32_t size = taosArrayGetSize(pGroupResInfo->pRows);
|
||||
for (int32_t i = pGroupResInfo->index; i < size; i++) {
|
||||
void* pPos = taosArrayGetP(pGroupResInfo->pRows, i);
|
||||
destroyFlusedPos(pPos);
|
||||
}
|
||||
pGroupResInfo->freeItem = false;
|
||||
taosArrayDestroy(pGroupResInfo->pRows);
|
||||
pGroupResInfo->pRows = NULL;
|
||||
pGroupResInfo->index = 0;
|
||||
|
|
|
@ -777,7 +777,8 @@ static int32_t doStreamExecTask(SStreamTask* pTask) {
|
|||
int32_t code = 0;
|
||||
|
||||
// merge multiple input data if possible in the input queue.
|
||||
stDebug("s-task:%s start to extract data block from inputQ", id);
|
||||
int64_t st = taosGetTimestampMs();
|
||||
stDebug("s-task:%s start to extract data block from inputQ, ts:%" PRId64, id, st);
|
||||
|
||||
while (1) {
|
||||
int32_t blockSize = 0;
|
||||
|
@ -807,8 +808,6 @@ static int32_t doStreamExecTask(SStreamTask* pTask) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int64_t st = taosGetTimestampMs();
|
||||
|
||||
EExtractDataCode ret = streamTaskGetDataFromInputQ(pTask, &pInput, &numOfBlocks, &blockSize);
|
||||
if (ret == EXEC_AFTER_IDLE) {
|
||||
streamTaskSetIdleInfo(pTask, MIN_INVOKE_INTERVAL);
|
||||
|
@ -825,6 +824,10 @@ static int32_t doStreamExecTask(SStreamTask* pTask) {
|
|||
// dispatch checkpoint msg to all downstream tasks
|
||||
int32_t type = pInput->type;
|
||||
if (type == STREAM_INPUT__CHECKPOINT_TRIGGER) {
|
||||
#if 0
|
||||
// Injection error: for automatic kill long trans test
|
||||
taosMsleep(50*1000);
|
||||
#endif
|
||||
code = streamProcessCheckpointTriggerBlock(pTask, (SStreamDataBlock*)pInput);
|
||||
if (code != 0) {
|
||||
stError("s-task:%s failed to process checkpoint-trigger block, code:%s", pTask->id.idStr, tstrerror(code));
|
||||
|
|
|
@ -24,7 +24,7 @@ import platform
|
|||
import socket
|
||||
import threading
|
||||
import importlib
|
||||
|
||||
import ast
|
||||
import toml
|
||||
|
||||
from frame.log import *
|
||||
|
@ -56,6 +56,17 @@ def checkRunTimeError():
|
|||
if hwnd:
|
||||
os.system("TASKKILL /F /IM taosd.exe")
|
||||
|
||||
def get_local_classes_in_order(file_path):
|
||||
with open(file_path, "r", encoding="utf-8") as file:
|
||||
tree = ast.parse(file.read(), filename=file_path)
|
||||
|
||||
classes = [node.name for node in ast.walk(tree) if isinstance(node, ast.ClassDef)]
|
||||
return classes
|
||||
|
||||
def dynamicLoadModule(fileName):
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
return importlib.import_module(moduleName, package='..')
|
||||
|
||||
#
|
||||
# run case on previous cluster
|
||||
#
|
||||
|
@ -66,9 +77,11 @@ def runOnPreviousCluster(host, config, fileName):
|
|||
sep = "/"
|
||||
if platform.system().lower() == 'windows':
|
||||
sep = os.sep
|
||||
moduleName = fileName.replace(".py", "").replace(sep, ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
case = uModule.TDTestCase()
|
||||
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
case = case_class()
|
||||
|
||||
# create conn
|
||||
conn = taos.connect(host, config)
|
||||
|
@ -358,10 +371,11 @@ if __name__ == "__main__":
|
|||
updateCfgDictStr = ''
|
||||
# adapter_cfg_dict_str = ''
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
ucase = case_class()
|
||||
if ((json.dumps(updateCfgDict) == '{}') and hasattr(ucase, 'updatecfgDict')):
|
||||
updateCfgDict = ucase.updatecfgDict
|
||||
updateCfgDictStr = "-d %s"%base64.b64encode(json.dumps(updateCfgDict).encode()).decode()
|
||||
|
@ -530,10 +544,11 @@ if __name__ == "__main__":
|
|||
except:
|
||||
pass
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace("/", ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
ucase = case_class()
|
||||
if (json.dumps(updateCfgDict) == '{}'):
|
||||
updateCfgDict = ucase.updatecfgDict
|
||||
if (json.dumps(adapter_cfg_dict) == '{}'):
|
||||
|
|
|
@ -22,6 +22,9 @@ import json
|
|||
import platform
|
||||
import socket
|
||||
import threading
|
||||
import ast
|
||||
import importlib
|
||||
import os
|
||||
|
||||
import toml
|
||||
|
||||
|
@ -56,6 +59,17 @@ def checkRunTimeError():
|
|||
if hwnd:
|
||||
os.system("TASKKILL /F /IM taosd.exe")
|
||||
|
||||
def get_local_classes_in_order(file_path):
|
||||
with open(file_path, "r", encoding="utf-8") as file:
|
||||
tree = ast.parse(file.read(), filename=file_path)
|
||||
|
||||
classes = [node.name for node in ast.walk(tree) if isinstance(node, ast.ClassDef)]
|
||||
return classes
|
||||
|
||||
def dynamicLoadModule(fileName):
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
return importlib.import_module(moduleName, package='..')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
|
@ -295,10 +309,11 @@ if __name__ == "__main__":
|
|||
updateCfgDictStr = ""
|
||||
# adapter_cfg_dict_str = ''
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
ucase = case_class()
|
||||
if (json.dumps(updateCfgDict) == "{}") and hasattr(
|
||||
ucase, "updatecfgDict"
|
||||
):
|
||||
|
@ -434,10 +449,11 @@ if __name__ == "__main__":
|
|||
except:
|
||||
pass
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace("/", ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
ucase = case_class()
|
||||
if json.dumps(updateCfgDict) == "{}":
|
||||
updateCfgDict = ucase.updatecfgDict
|
||||
if json.dumps(adapter_cfg_dict) == "{}":
|
||||
|
|
|
@ -19,6 +19,7 @@ import subprocess
|
|||
import time
|
||||
from distutils.log import warn as printf
|
||||
import platform
|
||||
import ast
|
||||
|
||||
from util.log import *
|
||||
from util.dnodes import *
|
||||
|
@ -26,6 +27,17 @@ from util.cases import *
|
|||
|
||||
import taos
|
||||
|
||||
def get_local_classes_in_order(file_path):
|
||||
with open(file_path, "r", encoding="utf-8") as file:
|
||||
tree = ast.parse(file.read(), filename=file_path)
|
||||
|
||||
classes = [node.name for node in ast.walk(tree) if isinstance(node, ast.ClassDef)]
|
||||
return classes
|
||||
|
||||
def dynamicLoadModule(fileName):
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
return importlib.import_module(moduleName, package='..')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
fileName = "all"
|
||||
|
@ -136,10 +148,11 @@ if __name__ == "__main__":
|
|||
except:
|
||||
pass
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
ucase = case_class()
|
||||
tdDnodes.deploy(1,ucase.updatecfgDict)
|
||||
except :
|
||||
tdDnodes.deploy(1,{})
|
||||
|
@ -170,10 +183,11 @@ if __name__ == "__main__":
|
|||
except:
|
||||
pass
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace("/", ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
ucase = case_class()
|
||||
tdDnodes.deploy(1,ucase.updatecfgDict)
|
||||
except :
|
||||
tdDnodes.deploy(1,{})
|
||||
|
|
|
@ -20,6 +20,7 @@ import importlib
|
|||
import traceback
|
||||
from util.log import *
|
||||
import platform
|
||||
import ast
|
||||
|
||||
|
||||
class TDCase:
|
||||
|
@ -51,20 +52,22 @@ class TDCases:
|
|||
def addCluster(self, name, case):
|
||||
self.clusterCases.append(TDCase(name, case))
|
||||
|
||||
def get_local_classes(self, module):
|
||||
classes = []
|
||||
for name, obj in inspect.getmembers(module, inspect.isclass):
|
||||
if inspect.getmodule(obj) == module:
|
||||
classes.append(name)
|
||||
def get_local_classes_in_order(self, file_path):
|
||||
with open(file_path, "r", encoding="utf-8") as file:
|
||||
tree = ast.parse(file.read(), filename=file_path)
|
||||
|
||||
classes = [node.name for node in ast.walk(
|
||||
tree) if isinstance(node, ast.ClassDef)]
|
||||
return classes
|
||||
|
||||
|
||||
def runAllLinux(self, conn):
|
||||
# TODO: load all Linux cases here
|
||||
runNum = 0
|
||||
for tmp in self.linuxCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
# get the last class name as the test case class name
|
||||
case_class = getattr(testModule, self.get_local_classes(testModule)[0])
|
||||
class_names = self.get_local_classes_in_order(fileName)
|
||||
case_class = getattr(testModule, class_names[-1])
|
||||
case = case_class()
|
||||
case.init(conn)
|
||||
case.run()
|
||||
|
@ -81,7 +84,8 @@ class TDCases:
|
|||
for tmp in self.linuxCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
# get the last class name as the test case class name
|
||||
case_class = getattr(testModule, self.get_local_classes(testModule)[-1])
|
||||
class_names = self.get_local_classes_in_order(fileName)
|
||||
case_class = getattr(testModule, class_names[-1])
|
||||
case = case_class()
|
||||
case.init(conn, self._logSql, replicaVar)
|
||||
try:
|
||||
|
@ -100,7 +104,8 @@ class TDCases:
|
|||
for tmp in self.windowsCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
# get the last class name as the test case class name
|
||||
case_class = getattr(testModule, self.get_local_classes(testModule)[-1])
|
||||
class_names = self.get_local_classes_in_order(fileName)
|
||||
case_class = getattr(testModule, class_names[-1])
|
||||
case = case_class()
|
||||
case.init(conn)
|
||||
case.run()
|
||||
|
@ -115,11 +120,12 @@ class TDCases:
|
|||
|
||||
runNum = 0
|
||||
for tmp in self.windowsCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
# get the last class name as the test case class name
|
||||
case_class = getattr(testModule, self.get_local_classes(testModule)[-1])
|
||||
class_names = self.get_local_classes_in_order(fileName)
|
||||
case_class = getattr(testModule, class_names[-1])
|
||||
case = case_class()
|
||||
case = testModule.TDTestCase()
|
||||
case.init(conn, self._logSql,replicaVar)
|
||||
case.init(conn, self._logSql, replicaVar)
|
||||
try:
|
||||
case.run()
|
||||
except Exception as e:
|
||||
|
@ -139,7 +145,8 @@ class TDCases:
|
|||
if tmp.name.find(fileName) != -1:
|
||||
tdLog.notice("run cases like %s" % (fileName))
|
||||
# get the last class name as the test case class name
|
||||
case_class = getattr(testModule, self.get_local_classes(testModule)[-1])
|
||||
class_names = self.get_local_classes_in_order(fileName)
|
||||
case_class = getattr(testModule, class_names[-1])
|
||||
case = case_class()
|
||||
case.init()
|
||||
case.run()
|
||||
|
@ -155,10 +162,11 @@ class TDCases:
|
|||
runNum = 0
|
||||
for tmp in self.clusterCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
tdLog.notice("run cases like %s" % (fileName))
|
||||
# get the last class name as the test case class name
|
||||
case_class = getattr(testModule, self.get_local_classes(testModule)[-1])
|
||||
class_names = self.get_local_classes_in_order(fileName)
|
||||
case_class = getattr(testModule, class_names[-1])
|
||||
case = case_class()
|
||||
case = testModule.TDTestCase()
|
||||
case.init()
|
||||
case.run()
|
||||
case.stop()
|
||||
|
@ -190,19 +198,20 @@ class TDCases:
|
|||
else:
|
||||
tdLog.info("taosBenchmark found in %s" % paths[0])
|
||||
return paths[0]
|
||||
|
||||
|
||||
def taosBenchmarkExec(self, param):
|
||||
buildPath = tdCases.getTaosBenchmarkPath()
|
||||
|
||||
if (platform.system().lower() == 'windows'):
|
||||
cmdStr1 = ' mintty -h never %s %s '%(buildPath, param)
|
||||
cmdStr1 = ' mintty -h never %s %s ' % (buildPath, param)
|
||||
tdLog.info(cmdStr1)
|
||||
os.system(cmdStr1)
|
||||
else:
|
||||
cmdStr1 = '%s %s &'%(buildPath, param)
|
||||
cmdStr1 = '%s %s &' % (buildPath, param)
|
||||
tdLog.info(cmdStr1)
|
||||
os.system(cmdStr1)
|
||||
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
|
||||
|
||||
tdCases = TDCases()
|
||||
|
|
|
@ -24,6 +24,7 @@ import platform
|
|||
import socket
|
||||
import threading
|
||||
import importlib
|
||||
import ast
|
||||
print(f"Python version: {sys.version}")
|
||||
print(f"Version info: {sys.version_info}")
|
||||
|
||||
|
@ -58,6 +59,18 @@ def checkRunTimeError():
|
|||
if hwnd:
|
||||
os.system("TASKKILL /F /IM taosd.exe")
|
||||
|
||||
def get_local_classes_in_order(file_path):
|
||||
with open(file_path, "r", encoding="utf-8") as file:
|
||||
tree = ast.parse(file.read(), filename=file_path)
|
||||
|
||||
classes = [node.name for node in ast.walk(tree) if isinstance(node, ast.ClassDef)]
|
||||
return classes
|
||||
|
||||
|
||||
def dynamicLoadModule(fileName):
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
return importlib.import_module(moduleName, package='..')
|
||||
|
||||
#
|
||||
# run case on previous cluster
|
||||
#
|
||||
|
@ -68,9 +81,11 @@ def runOnPreviousCluster(host, config, fileName):
|
|||
sep = "/"
|
||||
if platform.system().lower() == 'windows':
|
||||
sep = os.sep
|
||||
moduleName = fileName.replace(".py", "").replace(sep, ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
case = uModule.TDTestCase()
|
||||
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
case = case_class()
|
||||
|
||||
# create conn
|
||||
conn = taos.connect(host, config)
|
||||
|
@ -350,10 +365,11 @@ if __name__ == "__main__":
|
|||
updateCfgDictStr = ''
|
||||
# adapter_cfg_dict_str = ''
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
ucase = case_class()
|
||||
if ((json.dumps(updateCfgDict) == '{}') and hasattr(ucase, 'updatecfgDict')):
|
||||
updateCfgDict = ucase.updatecfgDict
|
||||
updateCfgDictStr = "-d %s"%base64.b64encode(json.dumps(updateCfgDict).encode()).decode()
|
||||
|
@ -522,10 +538,11 @@ if __name__ == "__main__":
|
|||
except:
|
||||
pass
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace("/", ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
ucase = case_class()
|
||||
if (json.dumps(updateCfgDict) == '{}'):
|
||||
updateCfgDict = ucase.updatecfgDict
|
||||
if (json.dumps(adapter_cfg_dict) == '{}'):
|
||||
|
|
|
@ -13,6 +13,15 @@
|
|||
# pip install src/connector/python/
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
import taosws
|
||||
import taosrest
|
||||
import taos
|
||||
from util.taosadapter import *
|
||||
from util.cluster import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
from util.log import *
|
||||
import toml
|
||||
import os
|
||||
import sys
|
||||
import getopt
|
||||
|
@ -24,21 +33,12 @@ import platform
|
|||
import socket
|
||||
import threading
|
||||
import importlib
|
||||
import inspect
|
||||
import ast
|
||||
print(f"Python version: {sys.version}")
|
||||
print(f"Version info: {sys.version_info}")
|
||||
|
||||
import toml
|
||||
sys.path.append("../pytest")
|
||||
from util.log import *
|
||||
from util.dnodes import *
|
||||
from util.cases import *
|
||||
from util.cluster import *
|
||||
from util.taosadapter import *
|
||||
|
||||
import taos
|
||||
import taosrest
|
||||
import taosws
|
||||
|
||||
def checkRunTimeError():
|
||||
import win32gui
|
||||
|
@ -46,8 +46,8 @@ def checkRunTimeError():
|
|||
while 1:
|
||||
time.sleep(1)
|
||||
timeCount = timeCount + 1
|
||||
print("checkRunTimeError",timeCount)
|
||||
if (timeCount>1200):
|
||||
print("checkRunTimeError", timeCount)
|
||||
if (timeCount > 1200):
|
||||
print("stop the test.")
|
||||
os.system("TASKKILL /F /IM taosd.exe")
|
||||
os.system("TASKKILL /F /IM taos.exe")
|
||||
|
@ -55,17 +55,21 @@ def checkRunTimeError():
|
|||
os.system("TASKKILL /F /IM mintty.exe")
|
||||
os.system("TASKKILL /F /IM python.exe")
|
||||
quit(0)
|
||||
hwnd = win32gui.FindWindow(None, "Microsoft Visual C++ Runtime Library")
|
||||
hwnd = win32gui.FindWindow(
|
||||
None, "Microsoft Visual C++ Runtime Library")
|
||||
if hwnd:
|
||||
os.system("TASKKILL /F /IM taosd.exe")
|
||||
|
||||
def get_local_classes(module):
|
||||
classes = []
|
||||
for name, obj in inspect.getmembers(module, inspect.isclass):
|
||||
if inspect.getmodule(obj) == module:
|
||||
classes.append(name)
|
||||
|
||||
def get_local_classes_in_order(file_path):
|
||||
with open(file_path, "r", encoding="utf-8") as file:
|
||||
tree = ast.parse(file.read(), filename=file_path)
|
||||
|
||||
classes = [node.name for node in ast.walk(
|
||||
tree) if isinstance(node, ast.ClassDef)]
|
||||
return classes
|
||||
|
||||
|
||||
def dynamicLoadModule(fileName):
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
return importlib.import_module(moduleName, package='..')
|
||||
|
@ -73,6 +77,8 @@ def dynamicLoadModule(fileName):
|
|||
#
|
||||
# run case on previous cluster
|
||||
#
|
||||
|
||||
|
||||
def runOnPreviousCluster(host, config, fileName):
|
||||
print("enter run on previeous")
|
||||
|
||||
|
@ -80,9 +86,11 @@ def runOnPreviousCluster(host, config, fileName):
|
|||
sep = "/"
|
||||
if platform.system().lower() == 'windows':
|
||||
sep = os.sep
|
||||
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
case_class = getattr(uModule, get_local_classes(uModule)[-1])
|
||||
case = case_class()
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
case = case_class()
|
||||
|
||||
# create conn
|
||||
conn = taos.connect(host, config)
|
||||
|
@ -127,7 +135,7 @@ if __name__ == "__main__":
|
|||
previousCluster = False
|
||||
crashGen = False
|
||||
opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RWD:n:i:aP:G', [
|
||||
'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy','createDnodeNums','restful','websocket','adaptercfgupdate','replicaVar','independentMnode','previous',"crashGen"])
|
||||
'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd', 'dnodeNums', 'mnodeNums', 'queryPolicy', 'createDnodeNums', 'restful', 'websocket', 'adaptercfgupdate', 'replicaVar', 'independentMnode', 'previous', "crashGen"])
|
||||
for key, value in opts:
|
||||
if key in ['-h', '--help']:
|
||||
tdLog.printNoPrefix(
|
||||
|
@ -153,7 +161,8 @@ if __name__ == "__main__":
|
|||
tdLog.printNoPrefix('-n the number of replicas')
|
||||
tdLog.printNoPrefix('-i independentMnode Mnode')
|
||||
tdLog.printNoPrefix('-a address sanitizer mode')
|
||||
tdLog.printNoPrefix('-P run case with [P]revious cluster, do not create new cluster to run case.')
|
||||
tdLog.printNoPrefix(
|
||||
'-P run case with [P]revious cluster, do not create new cluster to run case.')
|
||||
tdLog.printNoPrefix('-G crashGen mode')
|
||||
|
||||
sys.exit(0)
|
||||
|
@ -231,7 +240,8 @@ if __name__ == "__main__":
|
|||
|
||||
if key in ['-D', '--adaptercfgupdate']:
|
||||
try:
|
||||
adaptercfgupdate = eval(base64.b64decode(value.encode()).decode())
|
||||
adaptercfgupdate = eval(
|
||||
base64.b64decode(value.encode()).decode())
|
||||
except:
|
||||
print('adapter cfg update convert fail.')
|
||||
sys.exit(0)
|
||||
|
@ -245,7 +255,6 @@ if __name__ == "__main__":
|
|||
if key in ['-G', '--crashGen']:
|
||||
crashGen = True
|
||||
|
||||
|
||||
#
|
||||
# do exeCmd command
|
||||
#
|
||||
|
@ -272,7 +281,7 @@ if __name__ == "__main__":
|
|||
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
|
||||
while(processID):
|
||||
while (processID):
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
|
@ -299,7 +308,7 @@ if __name__ == "__main__":
|
|||
# psCmd = f"pgrep {toBeKilled}"
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
|
||||
while(processID):
|
||||
while (processID):
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
|
@ -346,7 +355,7 @@ if __name__ == "__main__":
|
|||
if platform.system().lower() == 'windows':
|
||||
fileName = fileName.replace("/", os.sep)
|
||||
if (masterIp == "" and not fileName == "0-others\\udf_create.py"):
|
||||
threading.Thread(target=checkRunTimeError,daemon=True).start()
|
||||
threading.Thread(target=checkRunTimeError, daemon=True).start()
|
||||
tdLog.info("Procedures for testing self-deployment")
|
||||
tdDnodes.init(deployPath, masterIp)
|
||||
tdDnodes.setTestCluster(testCluster)
|
||||
|
@ -364,11 +373,13 @@ if __name__ == "__main__":
|
|||
if is_test_framework:
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
try:
|
||||
case_class = getattr(uModule, get_local_classes(uModule)[-1])
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
ucase = case_class()
|
||||
if ((json.dumps(updateCfgDict) == '{}') and hasattr(ucase, 'updatecfgDict')):
|
||||
updateCfgDict = ucase.updatecfgDict
|
||||
updateCfgDictStr = "-d %s"%base64.b64encode(json.dumps(updateCfgDict).encode()).decode()
|
||||
updateCfgDictStr = "-d %s" % base64.b64encode(
|
||||
json.dumps(updateCfgDict).encode()).decode()
|
||||
if ((json.dumps(adapter_cfg_dict) == '{}') and hasattr(ucase, 'taosadapter_cfg_dict')):
|
||||
adapter_cfg_dict = ucase.taosadapter_cfg_dict
|
||||
# adapter_cfg_dict_str = f"-D {base64.b64encode(toml.dumps(adapter_cfg_dict).encode()).decode()}"
|
||||
|
@ -380,8 +391,8 @@ if __name__ == "__main__":
|
|||
tAdapter.init(deployPath, masterIp)
|
||||
tAdapter.stop(force_kill=True)
|
||||
|
||||
if dnodeNums == 1 :
|
||||
tdDnodes.deploy(1,updateCfgDict)
|
||||
if dnodeNums == 1:
|
||||
tdDnodes.deploy(1, updateCfgDict)
|
||||
tdDnodes.start(1)
|
||||
tdCases.logSql(logSql)
|
||||
if restful or websocket:
|
||||
|
@ -389,13 +400,15 @@ if __name__ == "__main__":
|
|||
tAdapter.start()
|
||||
|
||||
if queryPolicy != 1:
|
||||
queryPolicy=int(queryPolicy)
|
||||
queryPolicy = int(queryPolicy)
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
conn = taosrest.connect(
|
||||
url=f"http://{host}:6041", timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
conn = taosws.connect(
|
||||
f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
conn = taos.connect(host, config=tdDnodes.getSimCfgPath())
|
||||
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("create qnode on dnode 1")
|
||||
|
@ -403,16 +416,20 @@ if __name__ == "__main__":
|
|||
cursor.execute("show local variables")
|
||||
res = cursor.fetchall()
|
||||
for i in range(cursor.rowcount):
|
||||
if res[i][0] == "queryPolicy" :
|
||||
if res[i][0] == "queryPolicy":
|
||||
if int(res[i][1]) == int(queryPolicy):
|
||||
tdLog.info(f'alter queryPolicy to {queryPolicy} successfully')
|
||||
tdLog.info(
|
||||
f'alter queryPolicy to {queryPolicy} successfully')
|
||||
cursor.close()
|
||||
else:
|
||||
tdLog.debug(res)
|
||||
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
|
||||
else :
|
||||
tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums))
|
||||
dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode)
|
||||
tdLog.exit(
|
||||
f"alter queryPolicy to {queryPolicy} failed")
|
||||
else:
|
||||
tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode" % (
|
||||
dnodeNums, mnodeNums))
|
||||
dnodeslist = cluster.configure_cluster(
|
||||
dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode)
|
||||
tdDnodes = ClusterDnodes(dnodeslist)
|
||||
tdDnodes.init(deployPath, masterIp)
|
||||
tdDnodes.setTestCluster(testCluster)
|
||||
|
@ -429,31 +446,34 @@ if __name__ == "__main__":
|
|||
tAdapter.start()
|
||||
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
conn = taosrest.connect(
|
||||
url=f"http://{host}:6041", timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
conn = taos.connect(host, config=tdDnodes.getSimCfgPath())
|
||||
# tdLog.info(tdDnodes.getSimCfgPath(),host)
|
||||
if createDnodeNums == 1:
|
||||
createDnodeNums=dnodeNums
|
||||
createDnodeNums = dnodeNums
|
||||
else:
|
||||
createDnodeNums=createDnodeNums
|
||||
cluster.create_dnode(conn,createDnodeNums)
|
||||
cluster.create_mnode(conn,mnodeNums)
|
||||
createDnodeNums = createDnodeNums
|
||||
cluster.create_dnode(conn, createDnodeNums)
|
||||
cluster.create_mnode(conn, mnodeNums)
|
||||
try:
|
||||
if cluster.check_dnode(conn) :
|
||||
if cluster.check_dnode(conn):
|
||||
print("check dnode ready")
|
||||
except Exception as r:
|
||||
print(r)
|
||||
if queryPolicy != 1:
|
||||
queryPolicy=int(queryPolicy)
|
||||
queryPolicy = int(queryPolicy)
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
conn = taosrest.connect(
|
||||
url=f"http://{host}:6041", timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
conn = taosws.connect(
|
||||
f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
conn = taos.connect(host, config=tdDnodes.getSimCfgPath())
|
||||
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("create qnode on dnode 1")
|
||||
|
@ -461,23 +481,27 @@ if __name__ == "__main__":
|
|||
cursor.execute("show local variables")
|
||||
res = cursor.fetchall()
|
||||
for i in range(cursor.rowcount):
|
||||
if res[i][0] == "queryPolicy" :
|
||||
if res[i][0] == "queryPolicy":
|
||||
if int(res[i][1]) == int(queryPolicy):
|
||||
tdLog.info(f'alter queryPolicy to {queryPolicy} successfully')
|
||||
tdLog.info(
|
||||
f'alter queryPolicy to {queryPolicy} successfully')
|
||||
cursor.close()
|
||||
else:
|
||||
tdLog.debug(res)
|
||||
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
|
||||
tdLog.exit(
|
||||
f"alter queryPolicy to {queryPolicy} failed")
|
||||
|
||||
if ucase is not None and hasattr(ucase, 'noConn') and ucase.noConn == True:
|
||||
conn = None
|
||||
else:
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
conn = taosrest.connect(
|
||||
url=f"http://{host}:6041", timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
conn = taos.connect(
|
||||
host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
|
||||
if testCluster:
|
||||
tdLog.info("Procedures for testing cluster")
|
||||
|
@ -488,11 +512,13 @@ if __name__ == "__main__":
|
|||
else:
|
||||
tdLog.info("Procedures for testing self-deployment")
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
conn = taosrest.connect(
|
||||
url=f"http://{host}:6041", timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
conn = taos.connect(
|
||||
host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
|
||||
if fileName == "all":
|
||||
tdCases.runAllWindows(conn)
|
||||
|
@ -509,14 +535,19 @@ if __name__ == "__main__":
|
|||
tdDnodes.start(1)
|
||||
time.sleep(1)
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
conn = taosrest.connect(
|
||||
url=f"http://{host}:6041", timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
conn = taosws.connect(
|
||||
f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
tdLog.info("Procedures for tdengine deployed in %s" % (host))
|
||||
conn = taos.connect(
|
||||
host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
tdLog.info(
|
||||
"Procedures for tdengine deployed in %s" % (host))
|
||||
tdLog.info("query test after taosd restart")
|
||||
tdCases.runOneWindows(conn, sp[0] + "_" + "restart.py", replicaVar)
|
||||
tdCases.runOneWindows(
|
||||
conn, sp[0] + "_" + "restart.py", replicaVar)
|
||||
else:
|
||||
tdLog.info("not need to query")
|
||||
else:
|
||||
|
@ -536,8 +567,9 @@ if __name__ == "__main__":
|
|||
if is_test_framework:
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
try:
|
||||
case_class = getattr(uModule, get_local_classes(uModule)[-1])
|
||||
ucase = case_class()
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
ucase = case_class()
|
||||
if (json.dumps(updateCfgDict) == '{}'):
|
||||
updateCfgDict = ucase.updatecfgDict
|
||||
if (json.dumps(adapter_cfg_dict) == '{}'):
|
||||
|
@ -549,9 +581,9 @@ if __name__ == "__main__":
|
|||
tAdapter.init(deployPath, masterIp)
|
||||
tAdapter.stop(force_kill=True)
|
||||
|
||||
if dnodeNums == 1 :
|
||||
if dnodeNums == 1:
|
||||
# dnode is one
|
||||
tdDnodes.deploy(1,updateCfgDict)
|
||||
tdDnodes.deploy(1, updateCfgDict)
|
||||
tdDnodes.start(1)
|
||||
tdCases.logSql(logSql)
|
||||
|
||||
|
@ -560,13 +592,16 @@ if __name__ == "__main__":
|
|||
tAdapter.start()
|
||||
|
||||
if queryPolicy != 1:
|
||||
queryPolicy=int(queryPolicy)
|
||||
queryPolicy = int(queryPolicy)
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
conn = taosrest.connect(
|
||||
url=f"http://{host}:6041", timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
conn = taosws.connect(
|
||||
f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
conn = taos.connect(
|
||||
host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
# tdSql.init(conn.cursor())
|
||||
# tdSql.execute("create qnode on dnode 1")
|
||||
# tdSql.execute('alter local "queryPolicy" "%d"'%queryPolicy)
|
||||
|
@ -585,19 +620,23 @@ if __name__ == "__main__":
|
|||
cursor.execute("show local variables")
|
||||
res = cursor.fetchall()
|
||||
for i in range(cursor.rowcount):
|
||||
if res[i][0] == "queryPolicy" :
|
||||
if res[i][0] == "queryPolicy":
|
||||
if int(res[i][1]) == int(queryPolicy):
|
||||
tdLog.info(f'alter queryPolicy to {queryPolicy} successfully')
|
||||
tdLog.info(
|
||||
f'alter queryPolicy to {queryPolicy} successfully')
|
||||
cursor.close()
|
||||
else:
|
||||
tdLog.debug(res)
|
||||
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
|
||||
tdLog.exit(
|
||||
f"alter queryPolicy to {queryPolicy} failed")
|
||||
|
||||
else :
|
||||
else:
|
||||
# dnode > 1 cluster
|
||||
tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums))
|
||||
print(independentMnode,"independentMnode valuse")
|
||||
dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode)
|
||||
tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode" % (
|
||||
dnodeNums, mnodeNums))
|
||||
print(independentMnode, "independentMnode valuse")
|
||||
dnodeslist = cluster.configure_cluster(
|
||||
dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode)
|
||||
tdDnodes = ClusterDnodes(dnodeslist)
|
||||
tdDnodes.init(deployPath, masterIp)
|
||||
tdDnodes.setTestCluster(testCluster)
|
||||
|
@ -605,7 +644,7 @@ if __name__ == "__main__":
|
|||
tdDnodes.setAsan(asan)
|
||||
tdDnodes.stopAll()
|
||||
for dnode in tdDnodes.dnodes:
|
||||
tdDnodes.deploy(dnode.index,updateCfgDict)
|
||||
tdDnodes.deploy(dnode.index, updateCfgDict)
|
||||
for dnode in tdDnodes.dnodes:
|
||||
tdDnodes.starttaosd(dnode.index)
|
||||
tdCases.logSql(logSql)
|
||||
|
@ -616,34 +655,39 @@ if __name__ == "__main__":
|
|||
|
||||
# create taos connect
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
conn = taosrest.connect(
|
||||
url=f"http://{host}:6041", timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
print(tdDnodes.getSimCfgPath(),host)
|
||||
conn = taos.connect(
|
||||
host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
print(tdDnodes.getSimCfgPath(), host)
|
||||
if createDnodeNums == 1:
|
||||
createDnodeNums=dnodeNums
|
||||
createDnodeNums = dnodeNums
|
||||
else:
|
||||
createDnodeNums=createDnodeNums
|
||||
cluster.create_dnode(conn,createDnodeNums)
|
||||
cluster.create_mnode(conn,mnodeNums)
|
||||
createDnodeNums = createDnodeNums
|
||||
cluster.create_dnode(conn, createDnodeNums)
|
||||
cluster.create_mnode(conn, mnodeNums)
|
||||
|
||||
try:
|
||||
if cluster.check_dnode(conn) :
|
||||
if cluster.check_dnode(conn):
|
||||
print("check dnode ready")
|
||||
except Exception as r:
|
||||
print(r)
|
||||
|
||||
# do queryPolicy option
|
||||
if queryPolicy != 1:
|
||||
queryPolicy=int(queryPolicy)
|
||||
queryPolicy = int(queryPolicy)
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
conn = taosrest.connect(
|
||||
url=f"http://{host}:6041", timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
conn = taosws.connect(
|
||||
f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
conn = taos.connect(
|
||||
host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("create qnode on dnode 1")
|
||||
|
@ -651,14 +695,15 @@ if __name__ == "__main__":
|
|||
cursor.execute("show local variables")
|
||||
res = cursor.fetchall()
|
||||
for i in range(cursor.rowcount):
|
||||
if res[i][0] == "queryPolicy" :
|
||||
if res[i][0] == "queryPolicy":
|
||||
if int(res[i][1]) == int(queryPolicy):
|
||||
tdLog.info(f'alter queryPolicy to {queryPolicy} successfully')
|
||||
tdLog.info(
|
||||
f'alter queryPolicy to {queryPolicy} successfully')
|
||||
cursor.close()
|
||||
else:
|
||||
tdLog.debug(res)
|
||||
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
|
||||
|
||||
tdLog.exit(
|
||||
f"alter queryPolicy to {queryPolicy} failed")
|
||||
|
||||
# run case
|
||||
if testCluster:
|
||||
|
@ -670,11 +715,13 @@ if __name__ == "__main__":
|
|||
else:
|
||||
tdLog.info("Procedures for testing self-deployment")
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
conn = taosrest.connect(
|
||||
url=f"http://{host}:6041", timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
conn = taos.connect(
|
||||
host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
|
||||
if fileName == "all":
|
||||
tdCases.runAllLinux(conn)
|
||||
|
@ -692,14 +739,19 @@ if __name__ == "__main__":
|
|||
tdDnodes.start(1)
|
||||
time.sleep(1)
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
conn = taosrest.connect(
|
||||
url=f"http://{host}:6041", timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
conn = taosws.connect(
|
||||
f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
tdLog.info("Procedures for tdengine deployed in %s" % (host))
|
||||
conn = taos.connect(
|
||||
host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
tdLog.info(
|
||||
"Procedures for tdengine deployed in %s" % (host))
|
||||
tdLog.info("query test after taosd restart")
|
||||
tdCases.runOneLinux(conn, sp[0] + "_" + "restart.py", replicaVar)
|
||||
tdCases.runOneLinux(
|
||||
conn, sp[0] + "_" + "restart.py", replicaVar)
|
||||
else:
|
||||
tdLog.info("not need to query")
|
||||
|
||||
|
|
Loading…
Reference in New Issue