Merge branch 'main' into enh/killtrans
This commit is contained in:
commit
11e7126f98
|
@ -6,6 +6,8 @@ on:
|
|||
- 'main'
|
||||
- '3.0'
|
||||
- '3.1'
|
||||
- 'enh/cmake-TD-33848'
|
||||
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'packaging/**'
|
||||
|
|
|
@ -0,0 +1,101 @@
|
|||
name: TDengine Doc Build
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
target_branch:
|
||||
description: "Target branch name of for building the document"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
target_pr_number:
|
||||
description: "PR number of target branch to merge for building the document"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
env:
|
||||
DOC_WKC: "/root/doc_ci_work"
|
||||
ZH_DOC_REPO: "docs.taosdata.com"
|
||||
EN_DOC_REPO: "docs.tdengine.com"
|
||||
TD_REPO: "TDengine"
|
||||
TOOLS_REPO: "taos-tools"
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on:
|
||||
group: CI
|
||||
labels: [self-hosted, doc-build]
|
||||
outputs:
|
||||
changed_files_zh: ${{ steps.set_output.outputs.changed_files_zh }}
|
||||
changed_files_en: ${{ steps.set_output.outputs.changed_files_en }}
|
||||
changed_files_non_doc: ${{ steps.set_output.outputs.changed_files_non_doc }}
|
||||
changed_files_non_tdgpt: ${{ steps.set_output.outputs.changed_files_non_tdgpt }}
|
||||
steps:
|
||||
- name: Get the latest document contents from the repository
|
||||
run: |
|
||||
set -e
|
||||
# ./.github/scripts/update_repo.sh ${{ env.DOC_WKC }}/${{ env.TD_REPO }} ${{ inputs.target_branch }} ${{ inputs.target_pr_number }}
|
||||
cd ${{ env.DOC_WKC }}/${{ env.TD_REPO }}
|
||||
git reset --hard
|
||||
git clean -f
|
||||
git remote prune origin
|
||||
git fetch
|
||||
git checkout ${{ inputs.target_branch }}
|
||||
git pull >/dev/null
|
||||
git fetch origin +refs/pull/${{ inputs.target_pr_number }}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
- name: Check whether the document is changed and set output variables
|
||||
id: set_output
|
||||
run: |
|
||||
set -e
|
||||
cd ${{ env.DOC_WKC }}/${{ env.TD_REPO }}
|
||||
changed_files_zh=$(git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${{ inputs.tartget_branch }}`| grep "^docs/zh/" | tr '\n' ' ' || :)
|
||||
changed_files_en=$(git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${{ inputs.tartget_branch }}`| grep "^docs/en/" | tr '\n' ' ' || :)
|
||||
changed_files_non_doc=$(git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${{ inputs.tartget_branch }}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | tr '\n' ' ' || :)
|
||||
changed_files_non_tdgpt=$(git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${{ inputs.tartget_branch }}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -Ev "forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics" | tr '\n' ' ' ||:)
|
||||
echo "changed_files_zh=${changed_files_zh}" >> $GITHUB_OUTPUT
|
||||
echo "changed_files_en=${changed_files_en}" >> $GITHUB_OUTPUT
|
||||
echo "changed_files_non_doc=${changed_files_non_doc}" >> $GITHUB_OUTPUT
|
||||
echo "changed_files_non_tdgpt=${changed_files_non_tdgpt}" >> $GITHUB_OUTPUT
|
||||
|
||||
build:
|
||||
needs: check
|
||||
runs-on:
|
||||
group: CI
|
||||
labels: [self-hosted, doc-build]
|
||||
if: ${{ needs.check.outputs.changed_files_zh != '' || needs.check.outputs.changed_files_en != '' }}
|
||||
|
||||
steps:
|
||||
- name: Get the latest document contents
|
||||
run: |
|
||||
set -e
|
||||
#./.github/scripts/update_repo.sh ${{ env.DOC_WKC }}/${{ env.TD_REPO }} ${{ inputs.target_branch }} ${{ inputs.target_pr_number }}
|
||||
cd ${{ env.DOC_WKC }}/${{ env.TD_REPO }}
|
||||
git reset --hard
|
||||
git clean -f
|
||||
git remote prune origin
|
||||
git fetch
|
||||
git checkout ${{ inputs.target_branch }}
|
||||
git pull >/dev/null
|
||||
git fetch origin +refs/pull/${{ inputs.target_pr_number }}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
|
||||
- name: Build the chinese document
|
||||
if: ${{ needs.check.outputs.changed_files_zh != '' }}
|
||||
run: |
|
||||
cd ${{ env.DOC_WKC }}/${{ env.ZH_DOC_REPO }}
|
||||
yarn ass local
|
||||
yarn build
|
||||
|
||||
- name: Build the english document
|
||||
if: ${{ needs.check.outputs.changed_files_en != '' }}
|
||||
run: |
|
||||
cd ${{ env.DOC_WKC }}/${{ env.EN_DOC_REPO }}
|
||||
yarn ass local
|
||||
yarn build
|
||||
|
||||
outputs:
|
||||
changed_files_zh: ${{ needs.check.outputs.changed_files_zh }}
|
||||
changed_files_en: ${{ needs.check.outputs.changed_files_en }}
|
||||
changed_files_non_doc: ${{ needs.check.outputs.changed_files_non_doc }}
|
||||
changed_files_non_tdgpt: ${{ needs.check.outputs.changed_files_non_tdgpt }}
|
|
@ -7,36 +7,24 @@ Power BI is a business analytics tool provided by Microsoft. By configuring the
|
|||
|
||||
## Prerequisites
|
||||
|
||||
Install and run Power BI Desktop software (if not installed, please download the latest version for Windows OS 32/64 bit from its official address).
|
||||
- TDengine 3.3.4.0 and above version is installed and running normally (both Enterprise and Community versions are available).
|
||||
- taosAdapter is running normally, refer to [taosAdapter Reference](../../../tdengine-reference/components/taosadapter/).
|
||||
- Install and run Power BI Desktop software (if not installed, please download the latest version for Windows OS 32/64 bit from its official address).
|
||||
- Download the latest Windows OS X64 client driver from the TDengine official website and install it on the machine running Power BI. After successful installation, the TDengine driver can be seen in the "ODBC Data Sources (32-bit)" or "ODBC Data Sources (64-bit)" management tool.
|
||||
|
||||
## Install ODBC Driver
|
||||
## Configure Data Source
|
||||
|
||||
Download the latest Windows OS X64 client driver from the TDengine official website and install it on the machine running Power BI. After successful installation, the TDengine driver can be seen in the "ODBC Data Sources (32-bit)" or "ODBC Data Sources (64-bit)" management tool.
|
||||
**Step 1**, Search and open the [ODBC Data Source (64 bit)] management tool in the Start menu of the Windows operating system and configure it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#Installation).
|
||||
|
||||
**Step 2**, Open Power BI and log in, click [Home] -> [Get Data] -> [Other] -> [ODBC] -> [Connect], add data source.
|
||||
|
||||
## Configure ODBC Data Source
|
||||
**Step 3**, Select the data source name just created, such as [MyTDengine], if you need to enter SQL, you can click the [Advanced options] tab, in the expanded dialog box enter the SQL statement. Click the [OK] button to connect to the configured data source.
|
||||
|
||||
The steps to configure the ODBC data source are as follows.
|
||||
**Step 4**, Enter the [Navigator], you can browse the corresponding database's tables/views and load data.
|
||||
|
||||
Step 1, search and open "ODBC Data Sources (32-bit)" or "ODBC Data Sources (64-bit)" management tool from the Windows start menu.
|
||||
Step 2, click the "User DSN" tab → "Add" button, enter the "Create New Data Source" dialog box.
|
||||
Step 3, in the list of "Select the driver you want to install for this data source" choose "TDengine", click the "Finish" button, enter the TDengine ODBC data source configuration page. Fill in the necessary information as follows.
|
||||
## Data Analysis
|
||||
|
||||
- DSN: Data source name, required, such as "MyTDengine".
|
||||
- Connection Type: Check the "WebSocket" checkbox.
|
||||
- URL: ODBC data source URL, required, such as `http://127.0.0.1:6041`.
|
||||
- Database: Indicates the database to connect to, optional, such as "test".
|
||||
- Username: Enter username, if not filled, default is "root".
|
||||
- Password: Enter user password, if not filled, default is "taosdata".
|
||||
|
||||
Step 4, click the "Test Connection" button, test the connection situation, if successfully connected, it will prompt "Successfully connected to `http://127.0.0.1:6041`".
|
||||
Step 5, click the "OK" button, to save the configuration and exit.
|
||||
|
||||
## Import TDengine Data into Power BI
|
||||
|
||||
The steps to import TDengine data into Power BI are as follows:
|
||||
Step 1, open Power BI and log in, click "Home" → "Get Data" → "Other" → "ODBC" → "Connect", add data source.
|
||||
Step 2, select the data source name just created, such as "MyTDengine", if you need to enter SQL, you can click the "Advanced options" tab, in the expanded dialog box enter the SQL statement. Click the "OK" button to connect to the configured data source.
|
||||
Step 3, enter the "Navigator", you can browse the corresponding database's tables/views and load data.
|
||||
### Instructions for use
|
||||
|
||||
To fully leverage Power BI's advantages in analyzing data from TDengine, users need to first understand core concepts such as dimensions, metrics, window split queries, data split queries, time-series, and correlation, then import data through custom SQL.
|
||||
|
||||
|
@ -47,7 +35,7 @@ To fully leverage Power BI's advantages in analyzing data from TDengine, users n
|
|||
- Time-Series: When drawing curves or aggregating data by time, it is usually necessary to introduce a date table. Date tables can be imported from Excel spreadsheets, or obtained in TDengine by executing SQL like `select _wstart date, count(*) cnt from test.meters where ts between A and B interval(1d) fill(0)`, where the fill clause represents the filling mode in case of data missing, and the pseudocolumn `_wstart` is the date column to be obtained.
|
||||
- Correlation: Tells how data is related, such as metrics and dimensions can be associated together through the tbname column, date tables and metrics can be associated through the date column, combined to form visual reports.
|
||||
|
||||
## Smart Meter Example
|
||||
### Smart Meter Example
|
||||
|
||||
TDengine employs a unique data model to optimize the storage and query performance of time-series data. This model uses supertables as templates to create an independent table for each device. Each table is designed with high scalability in mind, supporting up to 4096 data columns and 128 tag columns. This design enables TDengine to efficiently handle large volumes of time-series data while maintaining flexibility and ease of use.
|
||||
|
||||
|
@ -56,24 +44,35 @@ Taking smart meters as an example, suppose each meter generates one record per s
|
|||
In Power BI, users can map the tag columns in TDengine tables to dimension columns for grouping and filtering data. Meanwhile, the aggregated results of the data columns can be imported as measure columns for calculating key indicators and generating reports. In this way, Power BI helps decision-makers quickly obtain the information they need, gain a deeper understanding of business operations, and make more informed decisions.
|
||||
|
||||
Follow the steps below to experience the functionality of generating time-series data reports through Power BI.
|
||||
Step 1, Use TDengine's taosBenchMark to quickly generate data for 1,000 smart meters over 3 days, with a collection frequency of 1s.
|
||||
```shell
|
||||
taosBenchmark -t 1000 -n 259200 -S 1000 -y
|
||||
```
|
||||
Step 2, Import dimension data. In Power BI, import the tag columns of the table, named as tags, using the following SQL to get the tag data of all smart meters under the supertable.
|
||||
```sql
|
||||
select distinct tbname device, groupId, location from test.meters
|
||||
```
|
||||
Step 3, Import measure data. In Power BI, import the average current, voltage, and phase of each smart meter in 1-hour time windows, named as data, with the following SQL.
|
||||
```sql
|
||||
select tbname, _wstart ws, avg(current), avg(voltage), avg(phase) from test.meters PARTITION by tbname interval(1h)
|
||||
```
|
||||
Step 4, Import date data. Using a 1-day time window, obtain the time range and data count of the time-series data, with the following SQL. In the Power Query editor, convert the format of the date column from "text" to "date".
|
||||
```sql
|
||||
select _wstart date, count(*) from test.meters interval(1d) having count(*)>0
|
||||
```
|
||||
Step 5, Establish the relationship between dimensions and measures. Open the model view and establish the relationship between the tags and data tables, setting tbname as the relationship data column.
|
||||
Step 6, Establish the relationship between date and measures. Open the model view and establish the relationship between the date dataset and data, with the relationship data columns being date and datatime.
|
||||
Step 7, Create reports. Use these data in bar charts, pie charts, and other controls.
|
||||
|
||||
**Step 1**, Use TDengine's taosBenchMark to quickly generate data for 1,000 smart meters over 3 days, with a collection frequency of 1s.
|
||||
|
||||
```shell
|
||||
taosBenchmark -t 1000 -n 259200 -S 1000 -y
|
||||
```
|
||||
|
||||
**Step 2**, Import dimension data. In Power BI, import the tag columns of the table, named as tags, using the following SQL to get the tag data of all smart meters under the supertable.
|
||||
|
||||
```sql
|
||||
select distinct tbname device, groupId, location from test.meters
|
||||
```
|
||||
|
||||
**Step 3**, Import measure data. In Power BI, import the average current, voltage, and phase of each smart meter in 1-hour time windows, named as data, with the following SQL.
|
||||
|
||||
```sql
|
||||
select tbname, _wstart ws, avg(current), avg(voltage), avg(phase) from test.meters PARTITION by tbname interval(1h)
|
||||
```
|
||||
|
||||
**Step 4**, Import date data. Using a 1-day time window, obtain the time range and data count of the time-series data, with the following SQL. In the Power Query editor, convert the format of the date column from "text" to "date".
|
||||
|
||||
```sql
|
||||
select _wstart date, count(*) from test.meters interval(1d) having count(*)>0
|
||||
```
|
||||
|
||||
**Step 5**, Establish the relationship between dimensions and measures. Open the model view and establish the relationship between the tags and data tables, setting tbname as the relationship data column.
|
||||
|
||||
**Step 6**, Establish the relationship between date and measures. Open the model view and establish the relationship between the date dataset and data, with the relationship data columns being date and datatime.
|
||||
|
||||
**Step 7**, Create reports. Use these data in bar charts, pie charts, and other controls.
|
||||
|
||||
Due to TDengine's superior performance in handling time-series data, users can enjoy a very good experience during data import and daily regular data refreshes. For more information on building Power BI visual effects, please refer to the official Power BI documentation.
|
||||
|
|
|
@ -11,43 +11,44 @@ import imgStep04 from '../../assets/seeq-04.png';
|
|||
|
||||
Seeq is advanced analytics software for the manufacturing and Industrial Internet of Things (IIOT). Seeq supports innovative new features using machine learning in process manufacturing organizations. These features enable organizations to deploy their own or third-party machine learning algorithms to advanced analytics applications used by frontline process engineers and subject matter experts, thus extending the efforts of a single data scientist to many frontline staff.
|
||||
|
||||
Through the TDengine Java connector, Seeq can easily support querying time-series data provided by TDengine and offer data presentation, analysis, prediction, and other functions.
|
||||
Through the `TDengine Java connector`, Seeq can easily support querying time-series data provided by TDengine and offer data presentation, analysis, prediction, and other functions.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Seeq has been installed. Download the relevant software from [Seeq's official website](https://www.seeq.com/customer-download), such as Seeq Server and Seeq Data Lab, etc. Seeq Data Lab needs to be installed on a different server from Seeq Server and interconnected through configuration. For detailed installation and configuration instructions, refer to the [Seeq Knowledge Base](https://support.seeq.com/kb/latest/cloud/).
|
||||
- TDengine 3.1.0.3 and above version is installed and running normally (both Enterprise and Community versions are available).
|
||||
- taosAdapter is running normally, refer to [taosAdapter Reference](../../../tdengine-reference/components/taosadapter/).
|
||||
- Seeq has been installed. Download the relevant software from [Seeq's official website](https://www.seeq.com/customer-download), such as `Seeq Server` and `Seeq Data Lab`, etc. `Seeq Data Lab` needs to be installed on a different server from `Seeq Server` and interconnected through configuration. For detailed installation and configuration instructions, refer to the [Seeq Knowledge Base](https://support.seeq.com/kb/latest/cloud/).
|
||||
- Install the JDBC driver. Download the `TDengine JDBC connector` file `taos-jdbcdriver-3.2.5-dist.jar` or a higher version from `maven.org`.
|
||||
|
||||
- TDengine local instance has been installed. Please refer to the [official documentation](../../../get-started). If using TDengine Cloud, please go to https://cloud.taosdata.com apply for an account and log in to see how to access TDengine Cloud.
|
||||
## Configure Data Source
|
||||
|
||||
## Configuring Seeq to Access TDengine
|
||||
|
||||
1. Check the data storage location
|
||||
**Step 1**, Check the data storage location
|
||||
|
||||
```shell
|
||||
sudo seeq config get Folders/Data
|
||||
```
|
||||
|
||||
2. Download the TDengine Java connector package from maven.org, the latest version is [3.2.5](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar), and copy it to the plugins\lib in the data storage location.
|
||||
**Step 2**, Download the TDengine Java connector package from `maven.org` and copy it to the `plugins\lib` directory in the data storage location.
|
||||
|
||||
3. Restart seeq server
|
||||
**Step 3**, Restart seeq server
|
||||
|
||||
```shell
|
||||
sudo seeq restart
|
||||
```
|
||||
|
||||
4. Enter License
|
||||
**Step 4**, Enter License
|
||||
|
||||
Use a browser to visit ip:34216 and follow the instructions to enter the license.
|
||||
|
||||
## Using Seeq to Analyze TDengine Time-Series Data
|
||||
|
||||
This section demonstrates how to use Seeq software in conjunction with TDengine for time-series data analysis.
|
||||
## Data Analysis
|
||||
|
||||
### Scenario Introduction
|
||||
|
||||
The example scenario is a power system where users collect electricity usage data from power station instruments daily and store it in the TDengine cluster. Now, users want to predict how power consumption will develop and purchase more equipment to support it. User power consumption varies with monthly orders, and considering seasonal changes, power consumption will differ. This city is located in the northern hemisphere, so more electricity is used in summer. We simulate data to reflect these assumptions.
|
||||
|
||||
### Data Schema
|
||||
### Data preparation
|
||||
|
||||
**Step 1**, Create tables in TDengine.
|
||||
|
||||
```sql
|
||||
CREATE STABLE meters (ts TIMESTAMP, num INT, temperature FLOAT, goods INT) TAGS (device NCHAR(20));
|
||||
|
@ -58,7 +59,7 @@ CREATE TABLE goods (ts1 TIMESTAMP, ts2 TIMESTAMP, goods FLOAT);
|
|||
<Image img={imgStep01} alt=""/>
|
||||
</figure>
|
||||
|
||||
### Data Construction Method
|
||||
**Step 2**, Construct data in TDengine.
|
||||
|
||||
```shell
|
||||
python mockdata.py
|
||||
|
@ -67,11 +68,7 @@ taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from
|
|||
|
||||
The source code is hosted on [GitHub Repository](https://github.com/sangshuduo/td-forecasting).
|
||||
|
||||
## Using Seeq for Data Analysis
|
||||
|
||||
### Configuring Data Source
|
||||
|
||||
Log in using a Seeq administrator role account and create a new data source.
|
||||
**第 3 步**,Log in using a Seeq administrator role account and create a new data source.
|
||||
|
||||
- Power
|
||||
|
||||
|
@ -330,77 +327,7 @@ Program output results:
|
|||
<Image img={imgStep03} alt=""/>
|
||||
</figure>
|
||||
|
||||
## Configuring Seeq Data Source Connection to TDengine Cloud
|
||||
|
||||
Configuring a Seeq data source connection to TDengine Cloud is essentially no different from connecting to a local TDengine installation. Simply log in to TDengine Cloud, select "Programming - Java" and copy the JDBC string with a token to fill in as the DatabaseJdbcUrl value for the Seeq Data Source.
|
||||
Note that when using TDengine Cloud, the database name needs to be specified in SQL commands.
|
||||
|
||||
### Configuration example using TDengine Cloud as a data source
|
||||
|
||||
```json
|
||||
{
|
||||
"QueryDefinitions": [
|
||||
{
|
||||
"Name": "CloudVoltage",
|
||||
"Type": "SIGNAL",
|
||||
"Sql": "SELECT ts, voltage FROM test.meters",
|
||||
"Enabled": true,
|
||||
"TestMode": false,
|
||||
"TestQueriesDuringSync": true,
|
||||
"InProgressCapsulesEnabled": false,
|
||||
"Variables": null,
|
||||
"Properties": [
|
||||
{
|
||||
"Name": "Name",
|
||||
"Value": "Voltage",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Interpolation Method",
|
||||
"Value": "linear",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Maximum Interpolation",
|
||||
"Value": "2day",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
}
|
||||
],
|
||||
"CapsuleProperties": null
|
||||
}
|
||||
],
|
||||
"Type": "GENERIC",
|
||||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
"UseWindowsAuth": false,
|
||||
"SqlFetchBatchSize": 100000,
|
||||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.cloud.tdengine.com?useSSL=true&token=41ac9d61d641b6b334e8b76f45f5a8XXXXXXXXXX",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Example of Seeq Workbench Interface with TDengine Cloud as Data Source
|
||||
|
||||
<figure>
|
||||
<Image img={imgStep04} alt=""/>
|
||||
</figure>
|
||||
|
||||
## Solution Summary
|
||||
### Solution Summary
|
||||
|
||||
By integrating Seeq and TDengine, users can fully leverage the efficient storage and querying capabilities of TDengine, while also benefiting from the powerful data visualization and analysis features provided by Seeq.
|
||||
|
||||
|
|
|
@ -12,31 +12,34 @@ Through the Python connector of TDengine, Superset can support TDengine data sou
|
|||
## Prerequisites
|
||||
|
||||
Prepare the following environment:
|
||||
- TDengine is installed and running normally (both Enterprise and Community versions are available)
|
||||
- taosAdapter is running normally, refer to [taosAdapter](../../../tdengine-reference/components/taosadapter/)
|
||||
- Apache Superset version 2.1.0 or above is already installed, refre to [Apache Superset](https://superset.apache.org/)
|
||||
|
||||
## Install TDengine Python Connector
|
||||
- TDengine 3.2.3.0 and above version is installed and running normally (both Enterprise and Community versions are available).
|
||||
- taosAdapter is running normally, refer to [taosAdapter](../../../tdengine-reference/components/taosadapter/).
|
||||
- Apache Superset version 2.1.0 or above is already installed, refre to [Apache Superset](https://superset.apache.org/).
|
||||
- Install Python connector driver, refer to [Python Client Library](../../../tdengine-reference/client-libraries/python).
|
||||
|
||||
:::tip
|
||||
The Python connector of TDengine comes with a connection driver that supports Superset in versions 2.1.18 and later, which will be automatically installed in the Superset directory and provide data source services.
|
||||
The connection uses the WebSocket protocol, so it is necessary to install the `taos-ws-py` component of TDengine separately. The complete installation script is as follows:
|
||||
```bash
|
||||
pip3 install taospy
|
||||
pip3 install taos-ws-py
|
||||
```
|
||||
:::
|
||||
|
||||
## Configure TDengine Connection In Superset
|
||||
## Configure Data Source
|
||||
|
||||
**Step 1**, enter the new database connection page, [Superset] -> [Setting] -> [Database Connections] -> [+DATABASE].
|
||||
|
||||
**Step 2**, select TDengine database connection, select the `TDengine` option from the drop-down list of [SUPPORTED DATABASES].
|
||||
|
||||
**Step 1**, enter the new database connection page, "Superset" → "Setting" → "Database Connections" → "+DATABASE"
|
||||
**Step 2**, select TDengine database connection, select the "TDengine" option from the drop-down list of "SUPPORTED DATABASES".
|
||||
:::tip
|
||||
If there is no TDengine option in the drop-down list, please confirm that the steps of installing, `Superset` is first and `Python Connector` is second.
|
||||
:::
|
||||
**Step 3**, write a name of connection in "DISPLAY NAME"
|
||||
**Step 4**, The "SQLALCHEMY URL" field is a key connection information string, and it must be filled in correctly
|
||||
|
||||
**Step 3**, write a name of connection in [DISPLAY NAME].
|
||||
|
||||
**Step 4**, The [SQLALCHEMY URL] field is a key connection information string, and it must be filled in correctly.
|
||||
|
||||
```bash
|
||||
taosws://user:password@host:port
|
||||
```
|
||||
|
||||
| Parameter | <center>Parameter Description</center> |
|
||||
|:---------- |:--------------------------------------------------------- |
|
||||
|user | Username for logging into TDengine database |
|
||||
|
@ -44,32 +47,34 @@ taosws://user:password@host:port
|
|||
|host | Name of the host where the TDengine database is located |
|
||||
|port | The port that provides WebSocket services, default is 6041 |
|
||||
|
||||
Example:
|
||||
The TDengine database installed on this machine provides WebSocket service port 6041, using the default username and password, "SQLALCHEMY URL" is:
|
||||
Example:
|
||||
|
||||
The TDengine database installed on this machine provides WebSocket service port 6041, using the default username and password, `SQLALCHEMY URL` is:
|
||||
|
||||
```bash
|
||||
taosws://root:taosdata@localhost:6041
|
||||
```
|
||||
**Step 5**, configure the connection string, click "TEST CONNECTION" to test if the connection can be successful. After passing the test, click the "CONNECT" button to complete the connection
|
||||
|
||||
**Step 5**, configure the connection string, click "TEST CONNECTION" to test if the connection can be successful. After passing the test, click the "CONNECT" button to complete the connection.
|
||||
|
||||
## Data Analysis
|
||||
|
||||
## Start
|
||||
### Data preparation
|
||||
|
||||
There is no difference in the use of TDengine data source compared to other data sources. Here is a brief introduction to basic data queries:
|
||||
1. Click the "+" button in the upper right corner of the Superset interface, select "SQL query", and enter the query interface
|
||||
2. Select the "TDengine" data source that has been created earlier from the dropdown list of "DATABASES" in the upper left corner
|
||||
3. Select the name of the database to be operated on from the drop-down list of "SCHEMA" (system libraries are not displayed)
|
||||
4. "SEE TABLE SCHEMA" select the name of the super table or regular table to be operated on (sub tables are not displayed)
|
||||
5. Subsequently, the schema information of the selected table will be displayed in the following area
|
||||
6. In the SQL editor area, any SQL statement that conforms to TDengine syntax can be entered for execution
|
||||
There is no difference in the use of TDengine data source compared to other data sources. Here is a brief introduction to basic data queries:
|
||||
|
||||
## Example
|
||||
1. Click the [+] button in the upper right corner of the Superset interface, select [SQL query], and enter the query interface.
|
||||
2. Select the `TDengine` data source that has been created earlier from the dropdown list of [DATABASES] in the upper left corner.
|
||||
3. Select the name of the database to be operated on from the drop-down list of [SCHEMA] (system libraries are not displayed).
|
||||
4. [SEE TABLE SCHEMA] select the name of the super table or regular table to be operated on (sub tables are not displayed).
|
||||
5. Subsequently, the schema information of the selected table will be displayed in the following area.
|
||||
6. In the `SQL` editor area, any `SQL` statement that conforms to `TDengine` syntax can be entered for execution.
|
||||
|
||||
We chose two popular templates from the Superset Chart template to showcase their effects, using smart meter data as an example:
|
||||
### Smart Meter Example
|
||||
|
||||
1. "Aggregate" Type, which displays the maximum voltage value collected per minute during the specified time period in Group 4
|
||||
We chose two popular templates from the [Superset Chart] template to showcase their effects, using smart meter data as an example:
|
||||
|
||||

|
||||
|
||||
2. "RAW RECORDS" Type, which displays the collected values of current and voltage during the specified time period in Group 4
|
||||
|
||||

|
||||
1. `Aggregate` Type, which displays the maximum voltage value collected per minute during the specified time period in Group 4.
|
||||

|
||||
2. `RAW RECORDS` Type, which displays the collected values of current and voltage during the specified time period in Group 4.
|
||||

|
|
@ -84,12 +84,12 @@ After modifying configuration file parameters, you need to restart the *taosd* s
|
|||
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
|timezone | |Not supported |Time zone; defaults to dynamically obtaining the current time zone setting from the system|
|
||||
|locale | |Not supported |System locale information and encoding format, defaults to obtaining from the system|
|
||||
|charset | |Not supported |Character set encoding, defaults to obtaining from the system|
|
||||
|timezone | | since 3.1.0.0 |Time zone; defaults to dynamically obtaining the current time zone setting from the system|
|
||||
|locale | | since 3.1.0.0 |System locale information and encoding format, defaults to obtaining from the system|
|
||||
|charset | | since 3.1.0.0 |Character set encoding, defaults to obtaining from the system|
|
||||
|
||||
:::info
|
||||
|
||||
#### Explanation of Regional Related Parameters
|
||||
1. To address the issue of data writing and querying across multiple time zones, TDengine uses Unix Timestamps to record and store timestamps. The nature of Unix Timestamps ensures that the timestamps generated are consistent at any given moment across any time zone. It is important to note that the conversion to Unix Timestamps is done on the client side. To ensure that other forms of time on the client are correctly converted to Unix Timestamps, it is necessary to set the correct time zone.
|
||||
|
||||
On Linux/macOS, the client automatically reads the time zone information set by the system. Users can also set the time zone in the configuration file in various ways. For example:
|
||||
|
@ -532,29 +532,23 @@ The `taosd_vnodes_role` table records virtual node role information.
|
|||
| duration | VARCHAR | tag | SQL execution duration, value range: 3-10s, 10-100s, 100-1000s, 1000s- |
|
||||
| cluster_id | VARCHAR | tag | cluster id |
|
||||
|
||||
## Log Related
|
||||
### taos\_slow\_sql\_detail 表
|
||||
|
||||
TDengine records the system's operational status through log files, helping users monitor the system's condition and troubleshoot issues. This section mainly introduces the related explanations of two system logs: taosc and taosd.
|
||||
`taos_slow_sql_detail` records slow query detail information.The rule of the table name is `{user}_{db}_{ip}_clusterId_{cluster_id}`
|
||||
|
||||
TDengine's log files mainly include two types: normal logs and slow logs.
|
||||
|
||||
1. Normal Log Behavior Explanation
|
||||
1. Multiple client processes can be started on the same machine, so the client log naming convention is taoslogX.Y, where X is a number, either empty or from 0 to 9, and Y is a suffix, either 0 or 1.
|
||||
2. Only one server process can exist on the same machine. Therefore, the server log naming convention is taosdlog.Y, where Y is a suffix, either 0 or 1.
|
||||
|
||||
The rules for determining the number and suffix are as follows (assuming the log path is /var/log/taos/):
|
||||
1. Determining the number: Use 10 numbers as the log naming convention, /var/log/taos/taoslog0.Y - /var/log/taos/taoslog9.Y, check each number sequentially to find the first unused number as the log file number for that process. If all 10 numbers are used by processes, do not use a number, i.e., /var/log/taos/taoslog.Y, and all processes write to the same file (number is empty).
|
||||
2. Determining the suffix: 0 or 1. For example, if the number is determined to be 3, the alternative log file names would be /var/log/taos/taoslog3.0 /var/log/taos/taoslog3.1. If both files do not exist, use suffix 0; if one exists and the other does not, use the existing suffix. If both exist, use the suffix of the file that was modified most recently.
|
||||
3. If the log file exceeds the configured number of lines numOfLogLines, it will switch suffixes and continue logging, e.g., /var/log/taos/taoslog3.0 is full, switch to /var/log/taos/taoslog3.1 to continue logging. /var/log/taos/taoslog3.0 will be renamed with a timestamp suffix and compressed for storage (handled by an asynchronous thread).
|
||||
4. Control how many days log files are kept through the configuration logKeepDays, logs older than a certain number of days will be deleted when new logs are compressed and stored. It is not based on natural days.
|
||||
|
||||
In addition to recording normal logs, SQL statements that take longer than the configured time will be recorded in the slow logs. Slow log files are mainly used for analyzing system performance and troubleshooting performance issues.
|
||||
|
||||
2. Slow Log Behavior Explanation
|
||||
1. Slow logs are recorded both locally in slow log files and sent to taosKeeper for structured storage via taosAdapter (monitor switch must be turned on).
|
||||
2. Slow log file storage rules are:
|
||||
1. One slow log file per day; if there are no slow logs for the day, there is no file for that day.
|
||||
2. The file name is taosSlowLog.yyyy-mm-dd (taosSlowLog.2024-08-02), and the log storage path is configured through logDir.
|
||||
3. Logs from multiple clients are stored in the same taosSlowLog.yyyy.mm.dd file under the respective log path.
|
||||
4. Slow log files are not automatically deleted or compressed.
|
||||
5. Uses the same three parameters as normal log files: logDir, minimalLogDirGB, asyncLog. The other two parameters, numOfLogLines and logKeepDays, do not apply to slow logs.
|
||||
| field | type | is\_tag | comment |
|
||||
| :------------- | :-------- | :------ | :---------------------------------------------------- |
|
||||
| start\_ts | TIMESTAMP | | sql start exec time in client, ms,primary key |
|
||||
| request\_id | UINT64_T | | sql request id, random hash |
|
||||
| query\_time | INT32_T | | sql exec time, ms |
|
||||
| code | INT32_T | | sql return code, 0 success |
|
||||
| error\_info | VARCHAR | | error info if sql exec failed |
|
||||
| type | INT8_T | | sql type(1-query, 2-insert, 4-others) |
|
||||
| rows\_num | INT64_T | | sql result rows num |
|
||||
| sql | VARCHAR | | sql sting |
|
||||
| process\_name | VARCHAR | | process name |
|
||||
| process\_id | VARCHAR | | process id |
|
||||
| db | VARCHAR | TAG | which db the sql belong to |
|
||||
| user | VARCHAR | TAG | the user that exec this sql |
|
||||
| ip | VARCHAR | TAG | the client ip that exec this sql |
|
||||
| cluster\_id | VARCHAR | TAG | cluster id |
|
||||
|
|
|
@ -71,7 +71,10 @@ WebSocket Connector Historical Versions:
|
|||
|
||||
|WebSocket Connector Version | Major Changes | TDengine Version|
|
||||
| ----------------------- | -------------------------------------------------------------------------------------------------- | ----------------- |
|
||||
|0.3.5 | Added support for VARBINARY and GEOMETRY types, fixed known issues. | 3.3.0.0 and higher|
|
||||
|0.3.9 | Fix the problem of incomplete data retrieval when customizing the number of rows with the "fetchmany" method. | - |
|
||||
|0.3.8 | Supported connecting SuperSet to the TDengine cloud service instance. | - |
|
||||
|0.3.5 | Fixed the issues in the crypto provider. | - |
|
||||
|0.3.4 | Supported varbinary and geometry data type. | 3.3.0.0 and higher |
|
||||
|0.3.2 | Optimize WebSocket SQL query and insertion performance, modify readme and documentation, fix known issues. | 3.2.3.0 and higher|
|
||||
|0.2.9 | Known issue fixes. | - |
|
||||
|0.2.5 | 1. Data subscription supports obtaining and resetting consumption progress. <br/>2 Support schemaless. <br/>3 Support STMT. | - |
|
||||
|
|
|
@ -1,78 +1,79 @@
|
|||
---
|
||||
title: 与 PowerBI 的集成
|
||||
title: 与 PowerBI 集成
|
||||
sidebar_label: PowerBI
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
Power BI是由Microsoft提供的一种商业分析工具。通过配置使用ODBC连接器,Power BI可以快速访问TDengine的数据。用户可以将标签数据、原始时序数据或按时间聚合后的时序数据从TDengine导入到Power BI,制作报表或仪表盘,整个过程不需要任何代码编写过程。
|
||||
Power BI 是由 Microsoft 提供的一种商业分析工具。通过配置使用 ODBC 连接器,Power BI 可以快速访问 TDengine 的数据。用户可以将标签数据、原始时序数据或按时间聚合后的时序数据从 TDengine 导入到 Power BI,制作报表或仪表盘,整个过程不需要任何代码编写过程。
|
||||
|
||||
## 前置条件
|
||||
|
||||
安装完成Power BI Desktop软件并运行(如未安装,请从其官方地址下载最新的Windows操作系统 32/64 位版本)。
|
||||
准备以下环境:
|
||||
- TDengine 3.3.4.0 以上版本集群已部署并正常运行(企业及社区版均可)。
|
||||
- taosAdapter 能够正常运行,详细参考 [taosAdapter 参考手册](../../../reference/components/taosadapter)。
|
||||
- 从 TDengine 官网下载最新的 Windows 操作系统 X64 客户端驱动程序并进行安装,详细参考 [安装 ODBC 驱动](../../../reference/connector/odbc/#安装)。
|
||||
- 安装完成 Power BI Desktop 软件并运行(如未安装,请从其官方地址下载最新的Windows操作系统 32/64 位版本)。
|
||||
|
||||
## 安装 ODBC 驱动
|
||||
## 配置数据源
|
||||
|
||||
从TDengine官网下载最新的Windows操作系统X64客户端驱动程序,并安装在运行Power BI的机器上。安装成功后可在“ODBC数据源(32位)”或者“ODBC数据源(64位)”管理工具中看到 TDengine 驱动程序。
|
||||
**第 1 步**,在 Windows 操作系统的开始菜单中搜索并打开【ODBC数据源(64位)】管理工具并进行配置。详细参考 [配置ODBC数据源](../../../reference/connector/odbc/#配置数据源)。
|
||||
|
||||
## 配置ODBC数据源
|
||||
**第 2 步**,打开 Power BI 并登录后,点击【主页】->【获取数据】->【其他】->【ODBC】->【连接】,添加数据源。
|
||||
|
||||
配置ODBC数据源的操作步骤如下。
|
||||
**第 3 步**,选择刚才创建的数据源名称,比如【MyTDengine】,如果需要输入 SQL,则可以点击【高级选项】选项卡,在展开的对话框的编辑框中输入 SQL 语句。点击【确定】按钮,即可连接到配置好的数据源。
|
||||
|
||||
第1步,在Windows操作系统的开始菜单中搜索并打开“ODBC数据源(32位)”或者“ODBC数据源(64位)”管理工具。
|
||||
第2步,点击“用户DSN”选项卡→“添加”按钮,进入“创建新数据源”对话框。
|
||||
第3步,在“选择您想为其安装数据源的驱动程序”列表中选择“TDengine”,点击“完成”按钮,进入TDengine ODBC数据源配置页面。填写如下必要信息。
|
||||
- DSN:数据源名称,必填,比如“MyTDengine”。
|
||||
- 连接类型:勾选“WebSocket”复选框。
|
||||
- URL:ODBC 数据源 URL,必填,比如`http://127.0.0.1:6041`。
|
||||
- 数据库:表示需要连接的数据库,可选,比如“test”。
|
||||
- 用户名:输入用户名,如果不填,默认为“root”。
|
||||
- 密码:输入用户密码,如果不填,默认为“taosdata”。
|
||||
**第 4 步**,进入【导航器】后,可以浏览对应数据库的数据表/视图并加载数据。
|
||||
|
||||
第4步,点击“测试连接”按钮,测试连接情况,如果成功连接,则会提示“成功连接到`http://127.0.0.1:6041`”。
|
||||
第5步,点击“确定”按钮,即可保存配置并退出。
|
||||
## 数据分析
|
||||
|
||||
## 导入TDengine数据到Power BI
|
||||
### 使用说明
|
||||
|
||||
将TDengine数据导入Power BI的操作步骤如下:
|
||||
第1步,打开Power BI并登录后,点击“主页”→“获取数据”→“其他”→“ODBC”→“连接”,添加数据源。
|
||||
第2步,选择刚才创建的数据源名称,比如“MyTDengine”,如果需要输入SQL,则可以点击“高级选项”选项卡,在展开的对话框的编辑框中输入SQL语句。点击“确定”按钮,即可连接到配置好的数据源。
|
||||
第3步,进入“导航器”后,可以浏览对应数据库的数据表/视图并加载数据。
|
||||
为了充分发挥 Power BI 在分析 TDengine中 数据方面的优势,用户需要先理解维度、度量、窗口切分查询、数据切分查询、时序和相关性等核心概念,之后通过自定义的 SQL 导入数据。
|
||||
- 维度:通常是分类(文本)数据,描述设备、测点、型号等类别信息。在 TDengine 的超级表中,使用标签列存储数据的维度信息,可以通过形如 “select distinct tbname, tag1, tag2 from supertable” 的SQL语法快速获得维度信息。
|
||||
- 度量:可以用于进行计算的定量(数值)字段,常见计算有求和、取平均值和最小值等。如果测点的采集周期为1s,那么一年就有 3000 多万条记录,把这些数据全部导入 Power BI 会严重影响其执行效率。在 TDengine 中,用户可以使用数据切分查询、窗口切分查询等语法,结合与窗口相关的伪列,把降采样后的数据导入Power BI 中,具体语法请参阅 TDengine 官方文档的特色查询功能部分。
|
||||
- 窗口切分查询:比如温度传感器每秒采集一次数据,但须查询每隔 10min 的温度平均值,在这种场景下可以使用窗口子句来获得需要的降采样查询结果,对应的 SQL 形如 `select tbname, _wstart date,avg(temperature) temp from table interval(10m)`,其中,`_wstart` 是伪列,表示时间窗口起始时间,10m 表示时间窗口的持续时间,`avg(temperature)` 表示时间窗口内的聚合值。
|
||||
- 数据切分查询:如果需要同时获取很多温度传感器的聚合数值,可对数据进行切分,然后在切分出的数据空间内进行一系列的计算,对应的 SQL 形如 `partition by part_list`。数据切分子句最常见的用法是在超级表查询中按标签将子表数据进行切分,将每个子表的数据独立出来,形成一条条独立的时间序列,方便针对各种时序场景的统计分析。
|
||||
- 时序:在绘制曲线或者按照时间聚合数据时,通常需要引入日期表。日期表可以从 Excel 表格中导入,也可以在 TDengine 中执行 SQL 获取,例如 `select _wstart date, count(*) cnt from test.meters where ts between A and B interval(1d) fill(0)`,其中 fill 字句表示数据缺失情况下的填充模式,伪列 _wstart 则为要获取的日期列。
|
||||
- 相关性:告诉数据之间如何关联,如度量和维度可以通过 tbname 列关联在一起,日期表和度量则可以通过 date 列关联,配合形成可视化报表。
|
||||
|
||||
为了充分发挥Power BI在分析TDengine中数据方面的优势,用户需要先理解维度、度量、窗口切分查询、数据切分查询、时序和相关性等核心概念,之后通过自定义的SQL导入数据。
|
||||
- 维度:通常是分类(文本)数据,描述设备、测点、型号等类别信息。在TDengine的超级表中,使用标签列存储数据的维度信息,可以通过形如“select distinct tbname, tag1, tag2 from supertable”的SQL语法快速获得维度信息。
|
||||
- 度量:可以用于进行计算的定量(数值)字段,常见计算有求和、取平均值和最小值等。如果测点的采集周期为1s,那么一年就有3000多万条记录,把这些数据全部导入Power BI会严重影响其执行效率。在TDengine中,用户可以使用数据切分查询、窗口切分查询等语法,结合与窗口相关的伪列,把降采样后的数据导入Power BI中,具体语法请参阅TDengine官方文档的特色查询功能部分。
|
||||
- 窗口切分查询:比如温度传感器每秒采集一次数据,但须查询每隔10min的温度平均值,在这种场景下可以使用窗口子句来获得需要的降采样查询结果,对应的SQL形如`select tbname, _wstart date,avg(temperature) temp from table interval(10m)`,其中,_wstart是伪列,表示时间窗口起始时间,10m表示时间窗口的持续时间,avg(temperature)表示时间窗口内的聚合值。
|
||||
- 数据切分查询:如果需要同时获取很多温度传感器的聚合数值,可对数据进行切分,然后在切分出的数据空间内进行一系列的计算,对应的SQL形如 `partition by part_list`。数据切分子句最常见的用法是在超级表查询中按标签将子表数据进行切分,将每个子表的数据独立出来,形成一条条独立的时间序列,方便针对各种时序场景的统计分析。
|
||||
- 时序:在绘制曲线或者按照时间聚合数据时,通常需要引入日期表。日期表可以从Excel表格中导入,也可以在TDengine中执行SQL获取,例如 `select _wstart date, count(*) cnt from test.meters where ts between A and B interval(1d) fill(0)`,其中fill字句表示数据缺失情况下的填充模式,伪列_wstart则为要获取的日期列。
|
||||
- 相关性:告诉数据之间如何关联,如度量和维度可以通过tbname列关联在一起,日期表和度量则可以通过date列关联,配合形成可视化报表。
|
||||
### 智能电表样例
|
||||
|
||||
## 智能电表样例
|
||||
TDengine 采用了一种独特的数据模型,以优化时序数据的存储和查询性能。该模型利用超级表作为模板,为每台设备创建一张独立的表。每张表在设计时考虑了高度的可扩展性,最多可包含 4096 个数据列和 128 个标签列。这种设计使得 TDengine 能够高效地处理大量时序数据,同时保持数据的灵活性和易用性。
|
||||
|
||||
TDengine采用了一种独特的数据模型,以优化时序数据的存储和查询性能。该模型利用超级表作为模板,为每台设备创建一张独立的表。每张表在设计时考虑了高度的可扩展性,最多可包含4096个数据列和128个标签列。这种设计使得TDengine能够高效地处理大量时序数据,同时保持数据的灵活性和易用性。
|
||||
以智能电表为例,假设每块电表每秒产生一条记录,那么每天将产生 86400 条记录。对于 1000 块智能电表来说,每年产生的记录将占用大约 600GB 的存储空间。面对如此庞大的数据量,Power BI 等商业智能工具在数据分析和可视化方面发挥着重要作用。
|
||||
|
||||
以智能电表为例,假设每块电表每秒产生一条记录,那么每天将产生86 400条记录。对于1000块智能电表来说,每年产生的记录将占用大约600GB的存储空间。面对如此庞大的数据量,Power BI等商业智能工具在数据分析和可视化方面发挥着重要作用。
|
||||
在 Power BI 中,用户可以将 TDengine 表中的标签列映射为维度列,以便对数据进行分组和筛选。同时,数据列的聚合结果可以导入为度量列,用于计算关键指标和生成报表。通过这种方式,Power BI 能够帮助决策者快速获取所需的信息,深入了解业务运营情况,从而制定更加明智的决策。
|
||||
|
||||
在Power BI中,用户可以将TDengine表中的标签列映射为维度列,以便对数据进行分组和筛选。同时,数据列的聚合结果可以导入为度量列,用于计算关键指标和生成报表。通过这种方式,Power BI能够帮助决策者快速获取所需的信息,深入了解业务运营情况,从而制定更加明智的决策。
|
||||
根据如下步骤,便可以体验通过 Power BI 生成时序数据报表的功能。
|
||||
|
||||
根据如下步骤,便可以体验通过Power BI生成时序数据报表的功能。
|
||||
第1步,使用TDengine的taosBenchMark快速生成1000块智能电表3天的数据,采集频率为1s。
|
||||
```shell
|
||||
taosBenchmark -t 1000 -n 259200 -S 1000 -y
|
||||
```
|
||||
第2步,导入维度数据。在Power BI中导入表的标签列,取名为tags,通过如下SQL获取超级表下所有智能电表的标签数据。
|
||||
```sql
|
||||
select distinct tbname device, groupId, location from test.meters
|
||||
```
|
||||
第3步,导入度量数据。在Power BI中,按照1小时的时间窗口,导入每块智能电表的电流均值、电压均值、相位均值,取名为data,SQL如下。
|
||||
```sql
|
||||
select tbname, _wstart ws, avg(current), avg(voltage), avg(phase) from test.meters PARTITION by tbname interval(1h)
|
||||
```
|
||||
第4步,导入日期数据。按照1天的时间窗口,获得时序数据的时间范围及数据计数,SQL如下。需要在Power Query编辑器中将date列的格式从“文本”转化为“日期”。
|
||||
```sql
|
||||
select _wstart date, count(*) from test.meters interval(1d) having count(*)>0
|
||||
```
|
||||
第5步,建立维度和度量的关联关系。打开模型视图,建立表tags和data的关联关系,将tbname设置为关联数据列。
|
||||
第6步,建立日期和度量的关联关系。打开模型视图,建立数据集date和data的关联关系,关联的数据列为date和datatime。
|
||||
第7步,制作报告。在柱状图、饼图等控件中使用这些数据。
|
||||
**第 1 步**,使用 TDengine 的 taosBenchMark 快速生成1000块智能电表3天的数据,采集频率为 1s。
|
||||
|
||||
由于TDengine处理时序数据的超强性能,使得用户在数据导入及每日定期刷新数据时,都可以得到非常好的体验。更多有关Power BI视觉效果的构建方法,请参照Power BI的官方文档。
|
||||
```shell
|
||||
taosBenchmark -t 1000 -n 259200 -S 1000 -y
|
||||
```
|
||||
|
||||
**第 2 步**,导入维度数据。在 Power BI 中导入表的标签列,取名为 tags,通过如下 SQL 获取超级表下所有智能电表的标签数据。
|
||||
|
||||
```sql
|
||||
select distinct tbname device, groupId, location from test.meters
|
||||
```
|
||||
|
||||
**第 3 步**,导入度量数据。在 Power BI 中,按照 1 小时的时间窗口,导入每块智能电表的电流均值、电压均值、相位均值,取名为 data,SQL如下。
|
||||
|
||||
```sql
|
||||
select tbname, _wstart ws, avg(current), avg(voltage), avg(phase) from test.meters PARTITION by tbname interval(1h)
|
||||
```
|
||||
|
||||
**第 4 步**,导入日期数据。按照 1 天的时间窗口,获得时序数据的时间范围及数据计数,SQL 如下。需要在 Power Query 编辑器中将 date 列的格式从“文本”转化为“日期”。
|
||||
|
||||
```sql
|
||||
select _wstart date, count(*) from test.meters interval(1d) having count(*)>0
|
||||
```
|
||||
|
||||
**第 5 步**,建立维度和度量的关联关系。打开模型视图,建立表 tags 和 data 的关联关系,将 tbname 设置为关联数据列。
|
||||
|
||||
**第 6 步**,建立日期和度量的关联关系。打开模型视图,建立数据集 date 和 data 的关联关系,关联的数据列为 date 和 datatime。
|
||||
|
||||
**第 7 步**,制作报告。在柱状图、饼图等控件中使用这些数据。
|
||||
|
||||
由于TDengine处理时序数据的超强性能,使得用户在数据导入及每日定期刷新数据时,都可以得到非常好的体验。更多有关 Power BI 视觉效果的构建方法,请参照 Power BI 的官方文档。
|
|
@ -1,46 +1,55 @@
|
|||
---
|
||||
title: 与永洪 BI 的集成
|
||||
title: 与永洪 BI 集成
|
||||
sidebar_label: 永洪 BI
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
永洪 BI 是一个专为各种规模企业打造的全业务链大数据分析解决方案,旨在帮助用户轻松发掘大数据价值,获取深入的洞察力。该平台以其灵活性和易用性而广受好评,无论企业规模大小,都能从中受益。
|
||||
|
||||
为了实现与 TDengine 的高效集成,永洪 BI 提供了 JDBC 连接器。用户只须按照简单的步骤配置数据源,即可将 TDengine 作为数据源添加到永洪BI中。这一过程不仅快速便捷,还能确保数据的准确性和稳定性。
|
||||
为了实现与 TDengine 的高效集成,永洪 BI 提供了 JDBC 连接器。用户只须按照简单的步骤配置数据源,即可将 TDengine 作为数据源添加到永洪 BI 中。这一过程不仅快速便捷,还能确保数据的准确性和稳定性。
|
||||
|
||||
一旦数据源配置完成,永洪BI便能直接从TDengine中读取数据,并利用其强大的数据处理和分析功能,为用户提供丰富的数据展示、分析和预测能力。这意味着用户无须编写复杂的代码或进行烦琐的数据转换工作,即可轻松获取所需的业务洞察。
|
||||
一旦数据源配置完成,永洪 BI 便能直接从 TDengine 中读取数据,并利用其强大的数据处理和分析功能,为用户提供丰富的数据展示、分析和预测能力。这意味着用户无须编写复杂的代码或进行烦琐的数据转换工作,即可轻松获取所需的业务洞察。
|
||||
|
||||
## 前置条件
|
||||
|
||||
准备以下环境:
|
||||
- TDengine 3.3.2.0 以上版本集群已部署并正常运行(企业及社区版均可)。
|
||||
- taosAdapter 能够正常运行,详细参考 [taosAdapter 参考手册](../../../reference/components/taosadapter)。
|
||||
- 确保永洪 BI 已经安装并运行(如果未安装,请到永洪科技官方下载页面下载)。
|
||||
- 安装JDBC驱动。从 maven.org 下载 TDengine JDBC 连接器文件 “taos-jdbcdriver-3.4.0-dist.jar”,并安装在永洪 BI 的机器上。
|
||||
- 安装 JDBC 驱动。从 maven.org 下载 TDengine JDBC 连接器文件 `taos-jdbcdriver-3.4.0-dist.jar` 及以上版本。
|
||||
|
||||
## 配置JDBC数据源
|
||||
## 配置数据源
|
||||
|
||||
配置JDBC数据源的步骤如下。
|
||||
配置JDBC数据源的步骤如下:
|
||||
|
||||
第1步,在打开的永洪BI中点击“添加数据源”按钮,选择SQL数据源中的“GENERIC”类型。
|
||||
第2步,点击“选择自定义驱动”按钮,在“驱动管理”对话框中点击“驱动列表”旁边的“+”,输入名称“MyTDengine”。然后点击“上传文件”按钮,上传刚刚下载的TDengine JDBC连接器文件“taos-jdbcdriver-3.2.7-dist.jar”,并选择“com.taosdata.jdbc.
|
||||
rs.RestfulDriver”驱动,最后点击“确定”按钮,完成驱动添加步骤。
|
||||
第3步,复制下面的内容到“URL”字段。
|
||||
```text
|
||||
jdbc:TAOS-RS://127.0.0.1:6041?user=root&password=taosdata
|
||||
```
|
||||
第4步,在“认证方式”中点击“无身份认证”单选按钮。
|
||||
第5步,在数据源的高级设置中修改“Quote 符号”的值为反引号(`)。
|
||||
第6步,点击“测试连接”按钮,弹出“测试成功”对话框。点击“保存”按钮,输入“MyTDengine”来保存TDengine数据源。
|
||||
**第 1 步**,在打开的永洪 BI 中点击【添加数据源】按钮,选择 SQL 数据源中的 “GENERIC” 类型。
|
||||
|
||||
## 创建TDengine数据集
|
||||
**第 2 步**,点击【选择自定义驱动】按钮,在【驱动管理】对话框中点击【驱动列表】旁边的 “+”,输入名称 “MyTDengine”。然后点击【上传文件】按钮,上传刚刚下载的 TDengine JDBC 连接器文件 `taos-jdbcdriver-3.2.7-dist.jar`,并选择 `com.taosdata.jdbc.rs.RestfulDriver` 驱动,最后点击“确定”按钮,完成驱动添加步骤。
|
||||
|
||||
创建TDengine数据集的步骤如下。
|
||||
**第 3 步**,复制下面的内容到【URL】字段。
|
||||
|
||||
第1步,在永洪BI中点击“添加数据源”按钮,展开刚刚创建的数据源,并浏览TDengine中的超级表。
|
||||
第2步,可以将超级表的数据全部加载到永洪BI中,也可以通过自定义SQL导入部分数据。
|
||||
第3步,当勾选“数据库内计算”复选框时,永洪BI将不再缓存TDengine的时序数据,并在处理查询时将SQL请求发送给TDengine直接处理。
|
||||
```text
|
||||
jdbc:TAOS-RS://127.0.0.1:6041?user=root&password=taosdata
|
||||
```
|
||||
|
||||
当导入数据后,永洪BI会自动将数值类型设置为“度量”列,将文本类型设置为“维度”列。而在TDengine的超级表中,由于将普通列作为数据的度量,将标签列作为数据的维度,因此用户可能需要在创建数据集时更改部分列的属性。TDengine在支持标准SQL的基础之上还提供了一系列满足时序业务场景需求的特色查询语法,例如数据切分查询、窗口切分查询等,具体操作步骤请参阅TDengine的官方文档。通过使用这些特色查询,当永洪BI将SQL查询发送到TDengine时,可以大大提高数据访问速度,减少网络传输带宽。
|
||||
**第 4 步**,在【认证方式】中点击【无身份认证】单选按钮。
|
||||
|
||||
**第 5 步**,在数据源的高级设置中修改 “Quote 符号” 的值为反引号(`)。
|
||||
|
||||
**第 6 步**,点击【测试连接】按钮,弹出【测试成功】对话框。点击【保存】按钮,输入 “MyTDengine” 来保存 TDengine 数据源。
|
||||
|
||||
**第 7 步**,在永洪 BI 中点击【添加数据源】按钮,展开刚刚创建的数据源,并浏览 TDengine 中的超级表。
|
||||
|
||||
**第 8 步**,可以将超级表的数据全部加载到永洪 BI 中,也可以通过自定义 SQL 导入部分数据。
|
||||
|
||||
**第 9 步**,当勾选【数据库内计算】复选框时,永洪 BI 将不再缓存 TDengine 的时序数据,并在处理查询时将 SQL 请求发送给 TDengine 直接处理。
|
||||
|
||||
## 数据分析
|
||||
|
||||
当导入数据后,永洪 BI 会自动将数值类型设置为 “度量” 列,将文本类型设置为 “维度” 列。而在 TDengine 的超级表中,由于将普通列作为数据的度量,将标签列作为数据的维度,因此用户可能需要在创建数据集时更改部分列的属性。TDengine 在支持标准 SQL 的基础之上还提供了一系列满足时序业务场景需求的特色查询语法,例如数据切分查询、窗口切分查询等,具体操作步骤请参阅 TDengine 的官方文档。通过使用这些特色查询,当永洪 BI 将 SQL 查询发送到 TDengine 时,可以大大提高数据访问速度,减少网络传输带宽。
|
||||
|
||||
在永洪 BI 中,你可以创建 “参数” 并在 SQL 中使用,通过手动、定时的方式动态执行这些 SQL,即可实现可视化报告的刷新效果。如下 SQL 可以从 TDengine 实时读取数据。
|
||||
|
||||
在永洪BI中,你可以创建“参数”并在SQL中使用,通过手动、定时的方式动态执行这些SQL,即可实现可视化报告的刷新效果。如下SQL可以从TDengine实时读取数据。
|
||||
```sql
|
||||
select _wstart ws, count(*) cnt from supertable where tbname=?{metric} and ts = ?{from} and ts < ?{to} interval(?{interval})
|
||||
```
|
||||
|
@ -49,17 +58,15 @@ select _wstart ws, count(*) cnt from supertable where tbname=?{metric} and ts =
|
|||
1. `_wstart`:表示时间窗口起始时间。
|
||||
2. `count(*)`:表示时间窗口内的聚合值。
|
||||
3. `?{interval}`:表示在 SQL 语句中引入名称为 `interval` 的参数,当 BI 工具查询数据时,会给 `interval` 参数赋值,如果取值为 1m,则表示按照 1 分钟的时间窗口降采样数据。
|
||||
4. `?{metric}`:该参数用来指定查询的数据表名称,当在 BI 工具中把某个“下拉参数组件”的 ID 也设置为 metric 时,该“下拉参数组件”的被选择项将会和该参数绑定在一起,实现动态选择的效果。
|
||||
5. `?{from}` 和 `?{to}`:这两个参数用来表示查询数据集的时间范围,可以与“文本参数组件”绑定。
|
||||
您可以在 BI 工具的“编辑参数”对话框中修改“参数”的数据类型、数据范围、默认取值,并在“可视化报告”中动态设置这些参数的值。
|
||||
4. `?{metric}`:该参数用来指定查询的数据表名称,当在 BI 工具中把某个 “下拉参数组件” 的 ID 也设置为 metric 时,该 “下拉参数组件” 的被选择项将会和该参数绑定在一起,实现动态选择的效果。
|
||||
5. `?{from}` 和 `?{to}`:这两个参数用来表示查询数据集的时间范围,可以与 “文本参数组件” 绑定。
|
||||
您可以在 BI 工具的【编辑参数】对话框中修改 “参数” 的数据类型、数据范围、默认取值,并在 “可视化报告” 中动态设置这些参数的值。
|
||||
|
||||
## 21.4.5 制作可视化报告
|
||||
制作可视化报告的步骤如下:
|
||||
|
||||
制作可视化报告的步骤如下。
|
||||
|
||||
1. 在永洪 BI 工具中点击“制作报告”,创建画布。
|
||||
2. 拖动可视化组件到画布中,例如“表格组件”。
|
||||
3. 在“数据集”侧边栏中选择待绑定的数据集,将数据列中的“维度”和“度量”按需绑定到“表格组件”。
|
||||
4. 点击“保存”后,即可查看报告。
|
||||
1. 在永洪 BI 工具中点击【制作报告】,创建画布。
|
||||
2. 拖动可视化组件到画布中,例如 “表格组件”。
|
||||
3. 在【数据集】侧边栏中选择待绑定的数据集,将数据列中的 “维度” 和 “度量” 按需绑定到 “表格组件”。
|
||||
4. 点击【保存】后,即可查看报告。
|
||||
5. 更多有关永洪 BI 工具的信息,请查询永洪科技官方帮助文档。
|
||||
|
||||
|
|
|
@ -1,48 +1,50 @@
|
|||
---
|
||||
sidebar_label: Seeq
|
||||
title: 与 Seeq 的集成
|
||||
title: 与 Seeq 集成
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
Seeq 是制造业和工业互联网(IIOT)高级分析软件。Seeq 支持在工艺制造组织中使用机器学习创新的新功能。这些功能使组织能够将自己或第三方机器学习算法部署到前线流程工程师和主题专家使用的高级分析应用程序,从而使单个数据科学家的努力扩展到许多前线员工。
|
||||
|
||||
通过 TDengine Java connector, Seeq 可以轻松支持查询 TDengine 提供的时序数据,并提供数据展现、分析、预测等功能。
|
||||
通过 `TDengine Java connector`, Seeq 可以轻松支持查询 TDengine 提供的时序数据,并提供数据展现、分析、预测等功能。
|
||||
|
||||
## 前置条件
|
||||
|
||||
- Seeq 已经安装。从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见[Seeq 知识库]( https://support.seeq.com/kb/latest/cloud/)。
|
||||
准备以下环境:
|
||||
- TDengine 3.1.0.3 以上版本集群已部署并正常运行(企业及社区版均可)。
|
||||
- taosAdapter 能够正常运行,详细参考 [taosAdapter 参考手册](../../../reference/components/taosadapter)。
|
||||
- Seeq 已经安装。从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 `Seeq Server` 和 `Seeq Data Lab` 等。`Seeq Data Lab` 需要安装在和 `Seeq Server` 不同的服务器上,并通过配置和 `Seeq Server` 互联。详细安装配置指令参见 [Seeq 知识库]( https://support.seeq.com/kb/latest/cloud/)。
|
||||
- 安装 JDBC 驱动。从 `maven.org` 下载 `TDengine JDBC` 连接器文件 `taos-jdbcdriver-3.2.5-dist.jar` 及以上版本。
|
||||
|
||||
- TDengine 本地实例已安装。 请参考[官网文档](../../../get-started)。 若使用 TDengine Cloud,请在 https://cloud.taosdata.com 申请帐号并登录查看如何访问 TDengine Cloud。
|
||||
## 配置数据源
|
||||
|
||||
## 配置 Seeq 访问 TDengine
|
||||
|
||||
1. 查看 data 存储位置
|
||||
**第 1 步**,查看 data 存储位置
|
||||
|
||||
```
|
||||
sudo seeq config get Folders/Data
|
||||
```
|
||||
|
||||
2. 从 maven.org 下载 TDengine Java connector 包,目前最新版本为[3.2.5](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar),并拷贝至 data 存储位置的 plugins\lib 中。
|
||||
**第 2 步**,将 `maven.org` 下载 `TDengine Java connector` 包并拷贝至 data 存储位置的 `plugins\lib` 中。
|
||||
|
||||
3. 重新启动 seeq server
|
||||
**第 3 步**,重新启动 seeq server
|
||||
|
||||
```
|
||||
sudo seeq restart
|
||||
```
|
||||
|
||||
4. 输入 License
|
||||
**第 4 步**,输入 License
|
||||
|
||||
使用浏览器访问 ip:34216 并按照说明输入 license。
|
||||
|
||||
## 使用 Seeq 分析 TDengine 时序数据
|
||||
|
||||
本章节演示如何使用 Seeq 软件配合 TDengine 进行时序数据分析。
|
||||
## 数据分析
|
||||
|
||||
### 场景介绍
|
||||
|
||||
示例场景为一个电力系统,用户每天从电站仪表收集用电量数据,并将其存储在 TDengine 集群中。现在用户想要预测电力消耗将会如何发展,并购买更多设备来支持它。用户电力消耗随着每月订单变化而不同,另外考虑到季节变化,电力消耗量会有所不同。这个城市位于北半球,所以在夏天会使用更多的电力。我们模拟数据来反映这些假定。
|
||||
|
||||
### 数据 Schema
|
||||
### 数据准备
|
||||
|
||||
**第 1 步**,在 TDengine 中创建表。
|
||||
|
||||
```
|
||||
CREATE STABLE meters (ts TIMESTAMP, num INT, temperature FLOAT, goods INT) TAGS (device NCHAR(20));
|
||||
|
@ -51,20 +53,16 @@ CREATE TABLE goods (ts1 TIMESTAMP, ts2 TIMESTAMP, goods FLOAT);
|
|||
|
||||

|
||||
|
||||
### 构造数据方法
|
||||
**第 2 步**,在 TDengine 中构造数据。
|
||||
|
||||
```
|
||||
python mockdata.py
|
||||
taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from power.meters interval(10d);"
|
||||
```
|
||||
|
||||
源代码托管在[GitHub 仓库](https://github.com/sangshuduo/td-forecasting)。
|
||||
源代码托管在 [GitHub 仓库](https://github.com/sangshuduo/td-forecasting)。
|
||||
|
||||
## 使用 Seeq 进行数据分析
|
||||
|
||||
### 配置数据源(Data Source)
|
||||
|
||||
使用 Seeq 管理员角色的帐号登录,并新建数据源。
|
||||
**第 3 步**,使用 Seeq 管理员角色的帐号登录,并新建数据源。
|
||||
|
||||
- Power
|
||||
|
||||
|
@ -246,7 +244,7 @@ taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from
|
|||
|
||||
### 使用 Seeq Workbench
|
||||
|
||||
登录 Seeq 服务页面并新建 Seeq Workbench,通过选择数据源搜索结果和根据需要选择不同的工具,可以进行数据展现或预测,详细使用方法参见[官方知识库](https://support.seeq.com/space/KB/146440193/Seeq+Workbench)。
|
||||
登录 Seeq 服务页面并新建 Seeq Workbench,通过选择数据源搜索结果和根据需要选择不同的工具,可以进行数据展现或预测,详细使用方法参见 [官方知识库](https://support.seeq.com/space/KB/146440193/Seeq+Workbench)。
|
||||
|
||||

|
||||
|
||||
|
@ -319,78 +317,10 @@ plt.show()
|
|||
|
||||

|
||||
|
||||
## 配置 Seeq 数据源连接 TDengine Cloud
|
||||
### 方案总结
|
||||
|
||||
配置 Seeq 数据源连接 TDengine Cloud 和连接 TDengine 本地安装实例没有本质的不同,只要登录 TDengine Cloud 后选择“编程 - Java”并拷贝带 token 字符串的 JDBC 填写为 Seeq Data Source 的 DatabaseJdbcUrl 值。
|
||||
注意使用 TDengine Cloud 时 SQL 命令中需要指定数据库名称。
|
||||
通过集成 Seeq 和 TDengine,可以充分利用 TDengine 高效的存储和查询性能,同时也可以受益于 Seeq 提供给用户的强大数据可视化和分析功能。
|
||||
|
||||
### 用 TDengine Cloud 作为数据源的配置内容示例:
|
||||
这种集成使用户能够充分利用 TDengine 的高性能时序数据存储和检索,确保高效处理大量数据。同时,Seeq 提供高级分析功能,如数据可视化、异常检测、相关性分析和预测建模,使用户能够获得有价值的洞察并基于数据进行决策。
|
||||
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": [
|
||||
{
|
||||
"Name": "CloudVoltage",
|
||||
"Type": "SIGNAL",
|
||||
"Sql": "SELECT ts, voltage FROM test.meters",
|
||||
"Enabled": true,
|
||||
"TestMode": false,
|
||||
"TestQueriesDuringSync": true,
|
||||
"InProgressCapsulesEnabled": false,
|
||||
"Variables": null,
|
||||
"Properties": [
|
||||
{
|
||||
"Name": "Name",
|
||||
"Value": "Voltage",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Interpolation Method",
|
||||
"Value": "linear",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Maximum Interpolation",
|
||||
"Value": "2day",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
}
|
||||
],
|
||||
"CapsuleProperties": null
|
||||
}
|
||||
],
|
||||
"Type": "GENERIC",
|
||||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
"UseWindowsAuth": false,
|
||||
"SqlFetchBatchSize": 100000,
|
||||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.cloud.taosdata.com?useSSL=true&token=41ac9d61d641b6b334e8b76f45f5a8XXXXXXXXXX",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### TDengine Cloud 作为数据源的 Seeq Workbench 界面示例
|
||||
|
||||

|
||||
|
||||
## 方案总结
|
||||
|
||||
通过集成Seeq和TDengine,可以充分利用TDengine高效的存储和查询性能,同时也可以受益于Seeq提供给用户的强大数据可视化和分析功能。
|
||||
|
||||
这种集成使用户能够充分利用TDengine的高性能时序数据存储和检索,确保高效处理大量数据。同时,Seeq提供高级分析功能,如数据可视化、异常检测、相关性分析和预测建模,使用户能够获得有价值的洞察并基于数据进行决策。
|
||||
|
||||
综合来看,Seeq和TDengine共同为制造业、工业物联网和电力系统等各行各业的时序数据分析提供了综合解决方案。高效数据存储和先进的分析相结合,赋予用户充分发挥时序数据潜力的能力,推动运营改进,并支持预测和规划分析应用。
|
||||
综合来看,Seeq 和 TDengine 共同为制造业、工业物联网和电力系统等各行各业的时序数据分析提供了综合解决方案。高效数据存储和先进的分析相结合,赋予用户充分发挥时序数据潜力的能力,推动运营改进,并支持预测和规划分析应用。
|
||||
|
|
|
@ -4,38 +4,39 @@ title: 与 Superset 集成
|
|||
---
|
||||
Apache Superset 是一个现代的企业级商业智能(BI)Web 应用程序,主要用于数据探索和可视化。它由 Apache 软件基金会支持,是一个开源项目,它拥有活跃的社区和丰富的生态系统。Apache Superset 提供了直观的用户界面,使得创建、分享和可视化数据变得简单,同时支持多种数据源和丰富的可视化选项。
|
||||
|
||||
通过 TDengine 的 Python 连接器, Apache Superset 可支持 TDengine 数据源并提供数据展现、分析等功能
|
||||
通过 TDengine 的 Python 连接器, Apache Superset 可支持 TDengine 数据源并提供数据展现、分析等功能。
|
||||
|
||||
|
||||
## 前置条件
|
||||
|
||||
准备以下环境:
|
||||
- TDengine 集群已部署并正常运行(企业及社区版均可)
|
||||
- taosAdapter 能够正常运行。详细参考 [taosAdapter 使用手册](../../../reference/components/taosadapter)
|
||||
- Apache Superset v2.1.0 或以上版本已安装。安装 Apache Superset 请参考 [官方文档](https://superset.apache.org/)
|
||||
- TDengine 3.2.3.0 及以上版本集群已部署并正常运行(企业及社区版均可)。
|
||||
- taosAdapter 能够正常运行,详细参考 [taosAdapter 使用手册](../../../reference/components/taosadapter)。
|
||||
- Apache Superset v2.1.0 或以上版本已安装,安装 Apache Superset 请参考 [官方文档](https://superset.apache.org/)。
|
||||
- 安装 Python 连接器驱动,详细参考 [TDengine Python Connector](../../../reference/connector/python)。
|
||||
|
||||
|
||||
## 安装 TDengine Python 连接器
|
||||
|
||||
TDengine Python 连接器从 `v2.1.18` 起带 Superset 连接驱动,会安装至 Superset 相应目录下并向 Superset 提供数据源服务
|
||||
Superset 与 TDengine 之间使用 WebSocket 协议连接,需安装支持此协议的 `taos-ws-py` 组件, 全部安装脚本如下:
|
||||
```bash
|
||||
pip3 install taospy
|
||||
pip3 install taos-ws-py
|
||||
```
|
||||
|
||||
## 配置 TDengine 数据源
|
||||
|
||||
**第 1 步**,进入新建数据库连接页面 "Superset" → "Setting" → "Database Connections" → "+DATABASE"
|
||||
**第 2 步**,选择 TDengine 数据库连接。"SUPPORTED DATABASES" 下拉列表中选择 "TDengine" 项。
|
||||
:::tip
|
||||
注意:若下拉列表中无 "TDengine" 项,请检查安装顺序,确保 `TDengine Python 连接器` 在 `Superset` 安装之后再安装。
|
||||
TDengine Python 连接器从 `v2.1.18` 起带 Superset 连接驱动,会安装至 Superset 相应目录下并向 Superset 提供数据源服务。
|
||||
:::
|
||||
|
||||
## 配置数据源
|
||||
|
||||
**第 1 步**,进入新建数据库连接页面【Superset】 -> 【Setting】->【Database Connections ->【+DATABASE】。
|
||||
|
||||
**第 2 步**,选择 TDengine 数据库连接。【SUPPORTED DATABASES】下拉列表中选择 `TDengine` 项。
|
||||
|
||||
:::tip
|
||||
注意:若下拉列表中无 `TDengine` 项,请检查安装顺序,确保 `TDengine Python 连接器` 在 `Superset` 安装之后再安装。
|
||||
:::
|
||||
**第 3 步**,"DISPLAY NAME" 中填写连接名称,任意填写即可。
|
||||
**第 4 步**,"SQLALCHEMY URL" 项为关键连接信息串,务必填写正确。
|
||||
|
||||
**第 3 步**,【DISPLAY NAME】中填写连接名称,任意填写即可。
|
||||
|
||||
**第 4 步**,【SQLALCHEMY URL】项为关键连接信息串,务必填写正确。
|
||||
|
||||
```bash
|
||||
taosws://用户名:密码@主机名:端口号
|
||||
```
|
||||
|
||||
| 参数名称 | <center>参数说明</center> |
|
||||
|:------- |:-------------------------------- |
|
||||
| 用户名 | 登录 TDengine 数据库用户名 |
|
||||
|
@ -43,32 +44,33 @@ taosws://用户名:密码@主机名:端口号
|
|||
| 主机名 | TDengine 数据库所在主机名称 |
|
||||
| 端口号 | 提供 WebSocket 服务的端口,默认:6041 |
|
||||
|
||||
示例:
|
||||
本机安装 TDengine 数据库,WebSocket 服务端口 6041,使用默认用户名密码,"SQLALCHEMY URL" 应为:
|
||||
示例:
|
||||
|
||||
本机安装 TDengine 数据库,WebSocket 服务端口 6041,使用默认用户名密码,`SQLALCHEMY URL` 应为:
|
||||
|
||||
```bash
|
||||
taosws://root:taosdata@localhost:6041
|
||||
```
|
||||
**第 5 步**,配置好连接串,点击 “TEST CONNECTION” 测试连接是否成功,测试通过后点击 “CONNECT” 按钮,完成连接。
|
||||
**第 5 步**,配置好连接串,点击【TEST CONNECTION】测试连接是否成功,测试通过后点击【CONNECT】按钮,完成连接。
|
||||
|
||||
## 数据分析
|
||||
|
||||
## 开始使用
|
||||
### 数据准备
|
||||
|
||||
TDengine 数据源与其它数据源使用上无差别,这里简单介绍下数据查询:
|
||||
1. Superset 界面点击右上角 “+” 号按钮,选择 “SQL query”, 进入查询界面
|
||||
2. 左上角 “DATABASE” 下拉列表中选择前面已创建好的 “TDengine” 数据源
|
||||
3. “SCHEMA” 下拉列表,选择要操作的数据库名(系统库不显示)
|
||||
4. “SEE TABLE SCHEMA” 选择要操作的超级表名或普通表名(子表不显示)
|
||||
5. 随后会在下方显示选定表的 SCHEMA 信息
|
||||
6. 在 SQL 编辑器区域可输入符合 TDengine 语法的任意 SQL 语句执行
|
||||
TDengine 数据源与其它数据源使用上无差别,这里简单介绍下数据查询:
|
||||
|
||||
## 示例效果
|
||||
1. `Superset` 界面点击右上角【+】号按钮,选择 `SQL query`, 进入查询界面。
|
||||
2. 左上角【DATABASE】下拉列表中选择前面已创建好的 `TDengine` 数据源。
|
||||
3. 【SCHEMA】下拉列表,选择要操作的数据库名(系统库不显示)。
|
||||
4. 【SEE TABLE SCHEMA】选择要操作的超级表名或普通表名(子表不显示)。
|
||||
5. 随后会在下方显示选定表的 `SCHEMA` 信息。
|
||||
6. 在 `SQL` 编辑器区域可输入符合 `TDengine` 语法的任意 `SQL` 语句执行。
|
||||
|
||||
我们选择 Superset Chart 模板中较流行的两个模板做了效果展示,以智能电表数据为例:
|
||||
### 智能电表样例
|
||||
|
||||
1. "Aggregate" 类型,展示在第 4 组中指定时间段内每分钟采集电压值(voltage)最大值
|
||||
我们选择【Superset Chart】模板中较流行的两个模板做了效果展示,以智能电表数据为例:
|
||||
|
||||

|
||||
|
||||
2. "RAW RECORDS" 类型,展示在第 4 组中指定时间段内 current, voltage 的采集值
|
||||
|
||||

|
||||
1. `Aggregate` 类型,展示在第 4 组中指定时间段内每分钟采集电压值(voltage)最大值。
|
||||

|
||||
2. `RAW RECORDS` 类型,展示在第 4 组中指定时间段内 current, voltage 的采集值。
|
||||

|
||||
|
|
|
@ -11,7 +11,7 @@ Tableau 是一款知名的商业智能工具,它支持多种数据源,可方
|
|||
- TDengine 3.3.5.4 以上版本集群已部署并正常运行(企业及社区版均可)
|
||||
- taosAdapter 能够正常运行。详细参考 [taosAdapter 参考手册](../../../reference/components/taosadapter)
|
||||
- Tableau 桌面版安装并运行(如未安装,请下载并安装 Windows 操作系统 64 位 [Tableau 桌面版](https://www.tableau.com/products/desktop/download) )。安装 Tableau 桌面版请参考 [官方文档](https://www.tableau.com)。
|
||||
- 从TDengine官网下载最新的Windows操作系统X64客户端驱动程序,并进行安装。详细参考 [安装 ODBC 驱动](../../../reference/connector/odbc/#安装)。
|
||||
- 从 TDengine 官网下载最新的 Windows 操作系统 X64 客户端驱动程序,并进行安装。详细参考 [安装 ODBC 驱动](../../../reference/connector/odbc/#安装)。
|
||||
|
||||
|
||||
## 配置数据源
|
||||
|
|
|
@ -459,6 +459,7 @@ taosd 命令行参数如下
|
|||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
|
||||
:::info
|
||||
#### 区域相关参数说明
|
||||
1. 为应对多时区的数据写入和查询问题,TDengine 采用 Unix 时间戳(Unix Timestamp)来记录和存储时间戳。Unix 时间戳的特点决定了任一时刻不论在任何时区,产生的时间戳均一致。需要注意的是,Unix 时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的 Unix 时间戳,需要设置正确的时区。
|
||||
|
||||
在 Linux/macOS 中,客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如:
|
||||
|
@ -1684,30 +1685,24 @@ taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeep
|
|||
| duration | VARCHAR | TAG | sql执行耗时,取值范围 3-10s,10-100s,100-1000s,1000s- |
|
||||
| cluster\_id | VARCHAR | TAG | cluster id |
|
||||
|
||||
## 日志相关
|
||||
### taos\_slow\_sql\_detail 表
|
||||
|
||||
TDengine 通过日志文件记录系统运行状态,帮助用户监控系统运行情况,排查问题,这里主要介绍 taosc 和 taosd 两个系统日志的相关说明。
|
||||
`taos_slow_sql_detail` 记录客户端慢查询详细信息。子表名规则为 `{user}_{db}_{ip}_clusterId_{cluster_id}`
|
||||
|
||||
TDengine 的日志文件主要包括普通日志和慢日志两种类型。
|
||||
|
||||
1. 普通日志行为说明
|
||||
1. 同一台机器上可以起多个客户端进程,所以客户端日志命名方式为 taoslogX.Y,其中 X 为序号,为空或者 0 到 9,Y 为后缀 0 或者 1。
|
||||
2. 同一台机器上只能有一个服务端进程。所以服务端日志命名方式为 taosdlog.Y,其中 Y 为后缀, 0 或者 1。
|
||||
|
||||
序号和后缀确定规则如下(假设日志路径为 /var/log/taos/):
|
||||
1. 确定序号:使用 10 个序号作为日志命名方式,/var/log/taos/taoslog0.Y - /var/log/taos/taoslog9.Y,依次检测每个序号是否使用,找到第一个没使用的序号作为该进程的日志文件使用的序号。 如果 10 个序号都被进程使用,不使用序号,即 /var/log/taos/taoslog.Y,进程都往相同的文件里写(序号为空)。
|
||||
2. 确定后缀:0 或者 1。比如确定序号为 3,备选的日志文件名就为 /var/log/taos/taoslog3.0 /var/log/taos/taoslog3.1。如果两个文件都不存在用后缀 0,一个存在一个不存在,用存在的后缀。两个都存在,用修改时间最近的那个后缀。
|
||||
3. 如果日志文件超过配置的条数 numOfLogLines,会切换后缀名,继续写日志,比如/var/log/taos/taoslog3.0 写够了,切换到 /var/log/taos/taoslog3.1 继续写日志。/var/log/taos/taoslog3.0 会添加时间戳后缀重命名并压缩存储(异步线程操作)。
|
||||
4. 通过配置 logKeepDays 控制日志文件保存几天,几天之外的日志会被删除。比如配置为 1,则一天之前的日志会在新日志压缩存储时检测删除。不是自然天。
|
||||
|
||||
系统除了记录普通日志以外,对于执行时间超过配置时间的 SQL 语句,会被记录到慢日志中。慢日志文件主要用于分析系统性能,排查性能问题。
|
||||
|
||||
2. 慢日志行为说明
|
||||
1. 慢日志一方面会记录到本地慢日志文件中,另一方面会通过 taosAdapter 发送到 taosKeeper 进行结构化存储(需打开 monitorr 开关)。
|
||||
2. 慢日志文件存储规则为:
|
||||
1. 慢日志文件一天一个,如果当天没有慢日志,没有当天的文件。
|
||||
2. 文件名为 taosSlowLog.yyyy-mm-dd(taosSlowLog.2024-08-02),日志存储路径通过 logDir 配置。
|
||||
3. 多个客户端的日志存储在相应日志路径下的同一个 taosSlowLog.yyyy.mm.dd 文件里。
|
||||
4. 慢日志文件不自动删除,不压缩。
|
||||
5. 使用和普通日志文件相同的三个参数 logDir, minimalLogDirGB, asyncLog。另外两个参数 numOfLogLines,logKeepDays 不适用于慢日志。
|
||||
| field | type | is\_tag | comment |
|
||||
| :------------- | :-------- | :------ | :---------------------------------------------------- |
|
||||
| start\_ts | TIMESTAMP | | sql 开始执行的客户端时间,单位ms,主键 |
|
||||
| request\_id | UINT64_T | | sql 请求的 request id,为 hash 生产的随机值 |
|
||||
| query\_time | INT32_T | | sql 执行耗时, 单位ms |
|
||||
| code | INT32_T | | sql 执行返回码,0表示成功 |
|
||||
| error\_info | VARCHAR | | sql 执行失败时,记录的错误信息 |
|
||||
| type | INT8_T | | sql 语句的类型(1-查询,2-写入,4-其他) |
|
||||
| rows\_num | INT64_T | | sql 执行结果的记录数目 |
|
||||
| sql | VARCHAR | | sql 语句的字符串 |
|
||||
| process\_name | VARCHAR | | 进程名称 |
|
||||
| process\_id | VARCHAR | | 进程 id |
|
||||
| db | VARCHAR | TAG | 执行 sql 所属数据库 |
|
||||
| user | VARCHAR | TAG | 执行 sql 语句的用户 |
|
||||
| ip | VARCHAR | TAG | 记录执行 sql 语句的 client 的 ip 地址 |
|
||||
| cluster\_id | VARCHAR | TAG | cluster id |
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ title: 权限管理
|
|||
---
|
||||
|
||||
TDengine 中的权限管理分为[用户管理](../user)、数据库授权管理以及消息订阅授权管理,本节重点说明数据库授权和订阅授权。
|
||||
授权管理仅在 TDengine 企业版中可用,请联系 TDengine 销售团队。授权语法在社区版可用,但不起作用。
|
||||
|
||||
## 数据库访问授权
|
||||
|
||||
|
|
|
@ -68,6 +68,10 @@ WebSocket Connector 历史版本:
|
|||
|
||||
| WebSocket Connector 版本 | 主要变化 | TDengine 版本 |
|
||||
| ----------------------- | ------------------------------------------------------------------------------------ | ----------------- |
|
||||
| 0.3.9 | 修复 fetchmany 自定义行数时获取不完全的问题 | - |
|
||||
| 0.3.8 | 支持 SuperSet 连接到 TDengine 云服务实例 | - |
|
||||
| 0.3.5 | 修复 crypto provider 中的问题 | - |
|
||||
| 0.3.4 | 支持 VARBINARY 和 GEOMETRY 数据类型 | 3.3.0.0 及更高版本 |
|
||||
| 0.3.2 | 优化 WebSocket sql 查询和插入性能,修改 readme 和 文档,修复已知问题 | 3.2.3.0 及更高版本 |
|
||||
| 0.2.9 | 已知问题修复 | - |
|
||||
| 0.2.5 | 1. 数据订阅支持获取消费进度和重置消费进度 <br/> 2. 支持 schemaless <br/> 3. 支持 STMT | - |
|
||||
|
|
|
@ -0,0 +1,107 @@
|
|||
---
|
||||
sidebar_label: 日志系统
|
||||
title: 日志系统
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
TDengine 通过日志文件记录系统运行状态,帮助用户监控系统运行情况,排查问题。Log 分为普通日志和慢日志。引擎测的运行状态通过普通日志的方式记录下来,系统运行相关的慢日志操作则记录到慢日志文件里。
|
||||
|
||||
## 普通日志
|
||||
|
||||
### 普通日志实现逻辑
|
||||
|
||||
- 普通日志分同步和异步两种方式,同步立即写入日志文件,异步写入到 buff 里,然后定时写入日志文件。
|
||||
- 异步方式日志文件缓存在循环 buff 里, buff 的大小为 buffSize = 20 M。如果某次写buf 的日志大小大于buf 可用空间,本次日志会舍弃,日志里记录: ...Lost N lines here...
|
||||

|
||||
- 异步线程里每隔 1 s 会更新磁盘信息用于判断是否有空间写日志
|
||||
- 异步线程每隔 Interval 时间处理一次写入逻辑。写入规则如下:
|
||||
- 如果buff 里数据小于 buffSize/10,不写入磁盘,除非超过1 s。
|
||||
- 如果buff 里数据大于 buffSize/10,全部写入磁盘。
|
||||
- Interval 默认值为 25 ms,Interval 值会根据每次写入日志的大小动态调整。Interval 调试规则如下:
|
||||
- 数据量小时(小于 buffSize/10),增大写入间隔,Interval 每次增加 5ms,最大25ms。
|
||||
- 数据量大时(大于 buffSize/3),写入间隔最小,Interval 为 5ms。
|
||||
- 数据量比较大时(大于 buffSize/4,小于等于buffSize/3),减小写入间隔,Interval 每次减小 5ms,最小5ms。
|
||||
- 数据量适中时(大于等于 buffSize/10,小于等于buffSize/4),写入间隔不变。
|
||||

|
||||
|
||||
### 普通日志行为说明
|
||||
- 普通日志命名规则
|
||||
- 同一台机器上可以起多个客户端进程,所以客户端日志命名方式为 taoslogX.Y,其中 X 为序号,为空或者 0 到 9,Y 为后缀 0 或者 1 (windows 限制只有一个序号,所以格式为 taoslog.Y)。
|
||||
- 同一台机器上可以起多个服务端进程。所以服务端日志命名方式为 taosdlog.Y,其中 Y 为后缀, 0 或者 1。
|
||||
- 序号和后缀确定规则如下(假设日志路径为 /var/log/taos/)
|
||||
- 确定序号:使用 10 个序号作为日志命名方式,/var/log/taos/taoslog0.Y - /var/log/taos/taoslog9.Y,依次检测每个序号是否使用,找到第一个没使用的序号作为该进程的日志文件使用的序号。 如果 10 个序号都被进程使用,不使用序号,即 /var/log/taos/taoslog.Y,进程都往相同的文件里写(序号为空)。
|
||||
- 确定后缀:0 或者 1。比如确定序号为 3,备选的日志文件名就为 /var/log/taos/taoslog3.0 /var/log/taos/taoslog3.1。如果两个文件都不存在用后缀 0,一个存在一个不存在,用存在的后缀。两个都存在,用修改时间最近的那个后缀。
|
||||
- 如果日志文件超过配置的条数 numOfLogLines,会切换后缀名,继续写日志,比如/var/log/taos/taoslog3.0 写够了,切换到 /var/log/taos/taoslog3.1 继续写日志。/var/log/taos/taoslog3.0 会添加时间戳后缀重命名并压缩存储(异步线程操作)。
|
||||
- 通过配置 logKeepDays 控制日志文件保存几天,几天之外的日志会被删除。比如配置为 1,则一天之前的日志会在新日志压缩存储时检测删除。不是自然天。
|
||||
- 当文件里日志行数大于 numOfLogLines(默认 1000w,取值范围 1000-20亿)时,会触发日志归档。
|
||||
- 举例:taoslog3.0 写满了,切换到 taoslog3.1 继续写。taoslog3.0 重命名为 taoslog.1735616543,然后压缩为 taoslog.1735616543.gz。同时,如果 logKeepDays > 0,会检测是否有超时的日志文件,然后删除。(该过程异步执行)
|
||||
|
||||
## 慢日志
|
||||
|
||||
系统除了记录普通日志以外,对于执行时间超过配置时间的操作,会被记录到慢日志中。慢日志文件主要用于分析系统性能,排查性能问题。
|
||||
### 慢日志实现逻辑
|
||||
#### 上报架构
|
||||

|
||||
#### 缓存逻辑
|
||||
- 为了提高上报效率,慢 sql 日志上报方式为批量上报。
|
||||
- 慢 sql 日志上报为了防止缓存丢失,采用写临时文件方式来实现缓存(crash 后不会丢失)。
|
||||
- 每生成一条慢 sql 日志都会放到队列里,然后通知 slow log 线程从队列获取数据,slow log 线程根据数据里 clusterId 写到不同的文件里。
|
||||
数据格式如下(其中,clusterId 为当前日志所属的慢查询集群id,value 为一条数据(json字符串形式))
|
||||
```c
|
||||
typedef struct {
|
||||
int64_t clusterId;
|
||||
char *value;
|
||||
}MonitorSlowLogData
|
||||
```
|
||||
- 说明:
|
||||
- 因为客户端进程里可能存在很多个链接 connection,所以需要将慢查询日志根据 clusterId 来分组。分组方式通过临时文件名来实现,命名方式为 ```{tmp dir}/tdengine_slow_log/tdengeine-{clusterId1}-{processId}-{rand}```,processId 为进程ID,主要为了区分多个客户端的上报。
|
||||
- 如上图 connection 1 连接的是 cluster 1。connection 2,connection 3 连接的是 cluster 2,所以connection 1 的慢 sql 数据写入文件 ```{tmp dir}/tdengine_slow_log/tdengeine-{clusterId1}-{processId}-{rand}```,connection 2 和 connection 3的慢 sql 数据写入文件 ```{tmp dir}/tdengine_slow_log/tdengeine-{clusterId1}-{processId}-{rand}```
|
||||
#### 上报逻辑
|
||||
- 读取 ```{tmp dir}/tdengine_slow_log/tdengeine-{clusterId1}-{processId}-{rand}``` 临时文件内容,每行数据作为 json 数组的一个元素,组装成 json 数组上报(文件里数据每接近 1M大小上报一次,上报成功后记录读取文件进度,上报采用异步上报方式。在 callback 里继续根据上次的进度,继续读取文件的内容上报,直至整个文件读取上报完毕,上报完毕后,会清空临时文件,callback 里成功或失败都会继续读取文件,失败时会记录上报失败的数据日志)。每接近 1M 上报一次主要为了防止文件太大,放在一次上报失败)。
|
||||
#### 上报时机
|
||||
- 客户端运行过程中定时上报
|
||||
- 每个 monitorInterval 时间间隔上报数据。
|
||||
- 客户端正常退出
|
||||
- 上报所有慢 sql 日志文件, 上报成功后,删除文件。
|
||||
- 客户端异常退出
|
||||
- 异常退出后再次与某个集群(clusterId)建立新的链接后遍历 ```{tmp dir}/tdengine_slow_log/``` 目录下 ```tdengine-{clusterId}``` 开头的所有文件进行重新上报(这些文件可能是另一个客户端进程或本进程正在操作的。所以每个文件打开时都需要添加文件锁),然后删除这个临时文件。
|
||||
#### 一些异常行为说明
|
||||
- 因为上报数据和删除文件里的上报内容没法作为一个原子操作,所以如果上报后还没删除数据就 crash,可能导致下次重复上报,重复上报的数据会覆盖,并没丢失,影响很小。
|
||||
- 另外为了保证性能, slow log thread 线程把慢 sql 日志写入临时文件缓存,只保证刷新到操作系统的磁盘缓冲区,并不真正每次都 fsync 到磁盘,所以如果机器断电,仍可能丢失数据。该异常出现概率很小,可以容忍此种情况下的数据丢失。
|
||||
### 慢日志行为说明
|
||||
- 慢日志一方面会记录到本地慢日志文件中,另一方面会通过 taosAdapter 发送到 taosKeeper 进行结构化存储(需打开 monitorr 开关)。
|
||||
- 慢日志文件存储规则为:
|
||||
- 慢日志文件一天一个,如果当天没有慢日志,没有当天的文件。
|
||||
- 文件名为 taosSlowLog.yyyy-mm-dd(taosSlowLog.2024-08-02),日志存储路径通过 logDir 配置。
|
||||
- 多个客户端的日志存储在相应日志路径下的同一个 taosSlowLog.yyyy.mm.dd 文件里。
|
||||
- 慢日志文件不自动删除,不压缩。
|
||||
- 使用和普通日志文件相同的三个参数 logDir, minimalLogDirGB, asyncLog。另外两个参数 numOfLogLines,logKeepDays 不适用于慢日志。
|
||||
|
||||
## 日志级别说明
|
||||
|
||||
日志级别分为9种,如下所示:
|
||||
|
||||
```c
|
||||
typedef enum {
|
||||
DEBUG_FATAL = 1,
|
||||
DEBUG_ERROR = 1,
|
||||
DEBUG_WARN = 2,
|
||||
DEBUG_INFO = 2,
|
||||
DEBUG_DEBUG = 4,
|
||||
DEBUG_TRACE = 8,
|
||||
DEBUG_DUMP = 16,
|
||||
DEBUG_SCREEN = 64,
|
||||
DEBUG_FILE = 128
|
||||
} ELogLevel;
|
||||
```
|
||||
|
||||
日志开关通过 bit 位来控制,具体如下:
|
||||
|
||||

|
||||
|
||||
例如:
|
||||
- 131 = 128 + 2 + 1 文件 + info + error
|
||||
- 135 = 128 + 4 + 2 + 1 文件 + debug + info + error
|
||||
- 143 = 128 + 8 + 4 + 2 + 1 文件 + trace + debug + info + error
|
||||
|
||||
通过设置日志开关的参数,可以开启不同级别的日志。
|
Binary file not shown.
After Width: | Height: | Size: 72 KiB |
Binary file not shown.
After Width: | Height: | Size: 64 KiB |
Binary file not shown.
After Width: | Height: | Size: 446 KiB |
Binary file not shown.
After Width: | Height: | Size: 46 KiB |
|
@ -39,38 +39,41 @@ static FORCE_INLINE int32_t stmtAllocQNodeFromBuf(STableBufInfo* pTblBuf, void**
|
|||
}
|
||||
|
||||
bool stmtDequeue(STscStmt* pStmt, SStmtQNode** param) {
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
int i = 0;
|
||||
while (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) {
|
||||
(void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex);
|
||||
if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) {
|
||||
if (i < 10) {
|
||||
taosUsleep(1);
|
||||
i++;
|
||||
} else {
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
if (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) {
|
||||
(void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex);
|
||||
}
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (pStmt->queue.stopQueue) {
|
||||
return false;
|
||||
}
|
||||
SStmtQNode* orig = pStmt->queue.head;
|
||||
SStmtQNode* node = pStmt->queue.head->next;
|
||||
pStmt->queue.head = pStmt->queue.head->next;
|
||||
*param = node;
|
||||
|
||||
(void)atomic_sub_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1);
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
|
||||
|
||||
*param = node;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void stmtEnqueue(STscStmt* pStmt, SStmtQNode* param) {
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
|
||||
pStmt->queue.tail->next = param;
|
||||
pStmt->queue.tail = param;
|
||||
|
||||
pStmt->stat.bindDataNum++;
|
||||
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
(void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1);
|
||||
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
|
||||
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
}
|
||||
|
||||
|
@ -423,11 +426,9 @@ void stmtResetQueueTableBuf(STableBufInfo* pTblBuf, SStmtQueue* pQueue) {
|
|||
pTblBuf->buffIdx = 1;
|
||||
pTblBuf->buffOffset = sizeof(*pQueue->head);
|
||||
|
||||
(void)taosThreadMutexLock(&pQueue->mutex);
|
||||
pQueue->head = pQueue->tail = pTblBuf->pCurBuff;
|
||||
pQueue->qRemainNum = 0;
|
||||
pQueue->head->next = NULL;
|
||||
(void)taosThreadMutexUnlock(&pQueue->mutex);
|
||||
}
|
||||
|
||||
int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool deepClean) {
|
||||
|
@ -778,7 +779,7 @@ void* stmtBindThreadFunc(void* param) {
|
|||
STscStmt* pStmt = (STscStmt*)param;
|
||||
|
||||
while (true) {
|
||||
if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) {
|
||||
if (pStmt->queue.stopQueue) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1630,8 +1631,9 @@ int stmtClose(TAOS_STMT* stmt) {
|
|||
STMT_DLOG_E("start to free stmt");
|
||||
|
||||
pStmt->queue.stopQueue = true;
|
||||
|
||||
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
(void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1);
|
||||
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
|
||||
|
|
|
@ -39,34 +39,41 @@ static FORCE_INLINE int32_t stmtAllocQNodeFromBuf(STableBufInfo* pTblBuf, void**
|
|||
}
|
||||
|
||||
static bool stmtDequeue(STscStmt2* pStmt, SStmtQNode** param) {
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
int i = 0;
|
||||
while (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) {
|
||||
(void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex);
|
||||
if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) {
|
||||
if (i < 10) {
|
||||
taosUsleep(1);
|
||||
i++;
|
||||
} else {
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
if (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) {
|
||||
(void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex);
|
||||
}
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (pStmt->queue.stopQueue) {
|
||||
return false;
|
||||
}
|
||||
SStmtQNode* orig = pStmt->queue.head;
|
||||
SStmtQNode* node = pStmt->queue.head->next;
|
||||
pStmt->queue.head = pStmt->queue.head->next;
|
||||
*param = node;
|
||||
|
||||
(void)atomic_sub_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1);
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void stmtEnqueue(STscStmt2* pStmt, SStmtQNode* param) {
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
|
||||
pStmt->queue.tail->next = param;
|
||||
pStmt->queue.tail = param;
|
||||
pStmt->stat.bindDataNum++;
|
||||
(void)atomic_add_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1);
|
||||
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
|
||||
|
||||
pStmt->stat.bindDataNum++;
|
||||
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
(void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1);
|
||||
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
}
|
||||
|
||||
|
@ -343,11 +350,9 @@ static void stmtResetQueueTableBuf(STableBufInfo* pTblBuf, SStmtQueue* pQueue) {
|
|||
pTblBuf->buffIdx = 1;
|
||||
pTblBuf->buffOffset = sizeof(*pQueue->head);
|
||||
|
||||
(void)taosThreadMutexLock(&pQueue->mutex);
|
||||
pQueue->head = pQueue->tail = pTblBuf->pCurBuff;
|
||||
pQueue->qRemainNum = 0;
|
||||
pQueue->head->next = NULL;
|
||||
(void)taosThreadMutexUnlock(&pQueue->mutex);
|
||||
}
|
||||
|
||||
static int32_t stmtCleanExecInfo(STscStmt2* pStmt, bool keepTable, bool deepClean) {
|
||||
|
@ -701,7 +706,7 @@ static void* stmtBindThreadFunc(void* param) {
|
|||
STscStmt2* pStmt = (STscStmt2*)param;
|
||||
|
||||
while (true) {
|
||||
if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) {
|
||||
if (pStmt->queue.stopQueue) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1764,6 +1769,7 @@ int stmtClose2(TAOS_STMT2* stmt) {
|
|||
pStmt->queue.stopQueue = true;
|
||||
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
(void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1);
|
||||
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
|
||||
|
|
|
@ -868,7 +868,7 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
|
|||
}
|
||||
|
||||
pResult->info.rows = 1;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
if (pResult != pSrcBlock) {
|
||||
|
|
|
@ -24,7 +24,7 @@ import platform
|
|||
import socket
|
||||
import threading
|
||||
import importlib
|
||||
|
||||
import ast
|
||||
import toml
|
||||
|
||||
from frame.log import *
|
||||
|
@ -56,6 +56,17 @@ def checkRunTimeError():
|
|||
if hwnd:
|
||||
os.system("TASKKILL /F /IM taosd.exe")
|
||||
|
||||
def get_local_classes_in_order(file_path):
|
||||
with open(file_path, "r", encoding="utf-8") as file:
|
||||
tree = ast.parse(file.read(), filename=file_path)
|
||||
|
||||
classes = [node.name for node in ast.walk(tree) if isinstance(node, ast.ClassDef)]
|
||||
return classes
|
||||
|
||||
def dynamicLoadModule(fileName):
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
return importlib.import_module(moduleName, package='..')
|
||||
|
||||
#
|
||||
# run case on previous cluster
|
||||
#
|
||||
|
@ -66,9 +77,11 @@ def runOnPreviousCluster(host, config, fileName):
|
|||
sep = "/"
|
||||
if platform.system().lower() == 'windows':
|
||||
sep = os.sep
|
||||
moduleName = fileName.replace(".py", "").replace(sep, ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
case = uModule.TDTestCase()
|
||||
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
case = case_class()
|
||||
|
||||
# create conn
|
||||
conn = taos.connect(host, config)
|
||||
|
@ -358,10 +371,11 @@ if __name__ == "__main__":
|
|||
updateCfgDictStr = ''
|
||||
# adapter_cfg_dict_str = ''
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
ucase = case_class()
|
||||
if ((json.dumps(updateCfgDict) == '{}') and hasattr(ucase, 'updatecfgDict')):
|
||||
updateCfgDict = ucase.updatecfgDict
|
||||
updateCfgDictStr = "-d %s"%base64.b64encode(json.dumps(updateCfgDict).encode()).decode()
|
||||
|
@ -530,10 +544,11 @@ if __name__ == "__main__":
|
|||
except:
|
||||
pass
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace("/", ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
ucase = case_class()
|
||||
if (json.dumps(updateCfgDict) == '{}'):
|
||||
updateCfgDict = ucase.updatecfgDict
|
||||
if (json.dumps(adapter_cfg_dict) == '{}'):
|
||||
|
|
|
@ -22,6 +22,9 @@ import json
|
|||
import platform
|
||||
import socket
|
||||
import threading
|
||||
import ast
|
||||
import importlib
|
||||
import os
|
||||
|
||||
import toml
|
||||
|
||||
|
@ -56,6 +59,17 @@ def checkRunTimeError():
|
|||
if hwnd:
|
||||
os.system("TASKKILL /F /IM taosd.exe")
|
||||
|
||||
def get_local_classes_in_order(file_path):
|
||||
with open(file_path, "r", encoding="utf-8") as file:
|
||||
tree = ast.parse(file.read(), filename=file_path)
|
||||
|
||||
classes = [node.name for node in ast.walk(tree) if isinstance(node, ast.ClassDef)]
|
||||
return classes
|
||||
|
||||
def dynamicLoadModule(fileName):
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
return importlib.import_module(moduleName, package='..')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
|
@ -295,10 +309,11 @@ if __name__ == "__main__":
|
|||
updateCfgDictStr = ""
|
||||
# adapter_cfg_dict_str = ''
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
ucase = case_class()
|
||||
if (json.dumps(updateCfgDict) == "{}") and hasattr(
|
||||
ucase, "updatecfgDict"
|
||||
):
|
||||
|
@ -434,10 +449,11 @@ if __name__ == "__main__":
|
|||
except:
|
||||
pass
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace("/", ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
ucase = case_class()
|
||||
if json.dumps(updateCfgDict) == "{}":
|
||||
updateCfgDict = ucase.updatecfgDict
|
||||
if json.dumps(adapter_cfg_dict) == "{}":
|
||||
|
|
|
@ -19,6 +19,7 @@ import subprocess
|
|||
import time
|
||||
from distutils.log import warn as printf
|
||||
import platform
|
||||
import ast
|
||||
|
||||
from util.log import *
|
||||
from util.dnodes import *
|
||||
|
@ -26,6 +27,17 @@ from util.cases import *
|
|||
|
||||
import taos
|
||||
|
||||
def get_local_classes_in_order(file_path):
|
||||
with open(file_path, "r", encoding="utf-8") as file:
|
||||
tree = ast.parse(file.read(), filename=file_path)
|
||||
|
||||
classes = [node.name for node in ast.walk(tree) if isinstance(node, ast.ClassDef)]
|
||||
return classes
|
||||
|
||||
def dynamicLoadModule(fileName):
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
return importlib.import_module(moduleName, package='..')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
fileName = "all"
|
||||
|
@ -136,10 +148,11 @@ if __name__ == "__main__":
|
|||
except:
|
||||
pass
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
ucase = case_class()
|
||||
tdDnodes.deploy(1,ucase.updatecfgDict)
|
||||
except :
|
||||
tdDnodes.deploy(1,{})
|
||||
|
@ -170,10 +183,11 @@ if __name__ == "__main__":
|
|||
except:
|
||||
pass
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace("/", ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
ucase = case_class()
|
||||
tdDnodes.deploy(1,ucase.updatecfgDict)
|
||||
except :
|
||||
tdDnodes.deploy(1,{})
|
||||
|
|
|
@ -20,7 +20,7 @@ import importlib
|
|||
import traceback
|
||||
from util.log import *
|
||||
import platform
|
||||
|
||||
import ast
|
||||
|
||||
class TDCase:
|
||||
def __init__(self, name, case):
|
||||
|
@ -51,12 +51,22 @@ class TDCases:
|
|||
def addCluster(self, name, case):
|
||||
self.clusterCases.append(TDCase(name, case))
|
||||
|
||||
def get_local_classes_in_order(self, file_path):
|
||||
with open(file_path, "r", encoding="utf-8") as file:
|
||||
tree = ast.parse(file.read(), filename=file_path)
|
||||
|
||||
classes = [node.name for node in ast.walk(tree) if isinstance(node, ast.ClassDef)]
|
||||
return classes
|
||||
|
||||
def runAllLinux(self, conn):
|
||||
# TODO: load all Linux cases here
|
||||
runNum = 0
|
||||
for tmp in self.linuxCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
case = testModule.TDTestCase()
|
||||
# get the last class name as the test case class name
|
||||
class_names = self.get_local_classes_in_order(fileName)
|
||||
case_class = getattr(testModule, class_names[-1])
|
||||
case = case_class()
|
||||
case.init(conn)
|
||||
case.run()
|
||||
case.stop()
|
||||
|
@ -72,9 +82,8 @@ class TDCases:
|
|||
for tmp in self.linuxCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
# get the last class name as the test case class name
|
||||
class_names = [name for name, obj in inspect.getmembers(testModule, inspect.isclass)]
|
||||
class_names = class_names[-1]
|
||||
case_class = getattr(testModule, class_names)
|
||||
class_names = self.get_local_classes_in_order(fileName)
|
||||
case_class = getattr(testModule, class_names[-1])
|
||||
case = case_class()
|
||||
case.init(conn, self._logSql, replicaVar)
|
||||
try:
|
||||
|
@ -92,7 +101,10 @@ class TDCases:
|
|||
runNum = 0
|
||||
for tmp in self.windowsCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
case = testModule.TDTestCase()
|
||||
# get the last class name as the test case class name
|
||||
class_names = self.get_local_classes_in_order(fileName)
|
||||
case_class = getattr(testModule, class_names[-1])
|
||||
case = case_class()
|
||||
case.init(conn)
|
||||
case.run()
|
||||
case.stop()
|
||||
|
@ -108,9 +120,8 @@ class TDCases:
|
|||
for tmp in self.windowsCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
# get the last class name as the test case class name
|
||||
class_names = [name for name, obj in inspect.getmembers(testModule, inspect.isclass)]
|
||||
class_names = class_names[-1]
|
||||
case_class = getattr(testModule, class_names)
|
||||
class_names = self.get_local_classes_in_order(fileName)
|
||||
case_class = getattr(testModule, class_names[-1])
|
||||
case = case_class()
|
||||
case.init(conn, self._logSql,replicaVar)
|
||||
try:
|
||||
|
@ -132,9 +143,8 @@ class TDCases:
|
|||
if tmp.name.find(fileName) != -1:
|
||||
tdLog.notice("run cases like %s" % (fileName))
|
||||
# get the last class name as the test case class name
|
||||
class_names = [name for name, obj in inspect.getmembers(testModule, inspect.isclass)]
|
||||
class_names = class_names[-1]
|
||||
case_class = getattr(testModule, class_names)
|
||||
class_names = self.get_local_classes_in_order(fileName)
|
||||
case_class = getattr(testModule, class_names[-1])
|
||||
case = case_class()
|
||||
case.init()
|
||||
case.run()
|
||||
|
@ -152,9 +162,8 @@ class TDCases:
|
|||
if tmp.name.find(fileName) != -1:
|
||||
tdLog.notice("run cases like %s" % (fileName))
|
||||
# get the last class name as the test case class name
|
||||
class_names = [name for name, obj in inspect.getmembers(testModule, inspect.isclass)]
|
||||
class_names = class_names[-1]
|
||||
case_class = getattr(testModule, class_names)
|
||||
class_names = self.get_local_classes_in_order(fileName)
|
||||
case_class = getattr(testModule, class_names[-1])
|
||||
case = case_class()
|
||||
case.init()
|
||||
case.run()
|
||||
|
|
|
@ -24,6 +24,7 @@ import platform
|
|||
import socket
|
||||
import threading
|
||||
import importlib
|
||||
import ast
|
||||
print(f"Python version: {sys.version}")
|
||||
print(f"Version info: {sys.version_info}")
|
||||
|
||||
|
@ -58,6 +59,18 @@ def checkRunTimeError():
|
|||
if hwnd:
|
||||
os.system("TASKKILL /F /IM taosd.exe")
|
||||
|
||||
def get_local_classes_in_order(file_path):
|
||||
with open(file_path, "r", encoding="utf-8") as file:
|
||||
tree = ast.parse(file.read(), filename=file_path)
|
||||
|
||||
classes = [node.name for node in ast.walk(tree) if isinstance(node, ast.ClassDef)]
|
||||
return classes
|
||||
|
||||
|
||||
def dynamicLoadModule(fileName):
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
return importlib.import_module(moduleName, package='..')
|
||||
|
||||
#
|
||||
# run case on previous cluster
|
||||
#
|
||||
|
@ -68,9 +81,11 @@ def runOnPreviousCluster(host, config, fileName):
|
|||
sep = "/"
|
||||
if platform.system().lower() == 'windows':
|
||||
sep = os.sep
|
||||
moduleName = fileName.replace(".py", "").replace(sep, ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
case = uModule.TDTestCase()
|
||||
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
case = case_class()
|
||||
|
||||
# create conn
|
||||
conn = taos.connect(host, config)
|
||||
|
@ -350,10 +365,11 @@ if __name__ == "__main__":
|
|||
updateCfgDictStr = ''
|
||||
# adapter_cfg_dict_str = ''
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
ucase = case_class()
|
||||
if ((json.dumps(updateCfgDict) == '{}') and hasattr(ucase, 'updatecfgDict')):
|
||||
updateCfgDict = ucase.updatecfgDict
|
||||
updateCfgDictStr = "-d %s"%base64.b64encode(json.dumps(updateCfgDict).encode()).decode()
|
||||
|
@ -522,10 +538,11 @@ if __name__ == "__main__":
|
|||
except:
|
||||
pass
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace("/", ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
ucase = case_class()
|
||||
if (json.dumps(updateCfgDict) == '{}'):
|
||||
updateCfgDict = ucase.updatecfgDict
|
||||
if (json.dumps(adapter_cfg_dict) == '{}'):
|
||||
|
|
|
@ -24,6 +24,7 @@ import platform
|
|||
import socket
|
||||
import threading
|
||||
import importlib
|
||||
import ast
|
||||
print(f"Python version: {sys.version}")
|
||||
print(f"Version info: {sys.version_info}")
|
||||
|
||||
|
@ -58,6 +59,18 @@ def checkRunTimeError():
|
|||
if hwnd:
|
||||
os.system("TASKKILL /F /IM taosd.exe")
|
||||
|
||||
def get_local_classes_in_order(file_path):
|
||||
with open(file_path, "r", encoding="utf-8") as file:
|
||||
tree = ast.parse(file.read(), filename=file_path)
|
||||
|
||||
classes = [node.name for node in ast.walk(tree) if isinstance(node, ast.ClassDef)]
|
||||
return classes
|
||||
|
||||
|
||||
def dynamicLoadModule(fileName):
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
return importlib.import_module(moduleName, package='..')
|
||||
|
||||
#
|
||||
# run case on previous cluster
|
||||
#
|
||||
|
@ -68,9 +81,11 @@ def runOnPreviousCluster(host, config, fileName):
|
|||
sep = "/"
|
||||
if platform.system().lower() == 'windows':
|
||||
sep = os.sep
|
||||
moduleName = fileName.replace(".py", "").replace(sep, ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
case = uModule.TDTestCase()
|
||||
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
case = case_class()
|
||||
|
||||
# create conn
|
||||
conn = taos.connect(host, config)
|
||||
|
@ -350,10 +365,11 @@ if __name__ == "__main__":
|
|||
updateCfgDictStr = ''
|
||||
# adapter_cfg_dict_str = ''
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
ucase = case_class()
|
||||
if ((json.dumps(updateCfgDict) == '{}') and hasattr(ucase, 'updatecfgDict')):
|
||||
updateCfgDict = ucase.updatecfgDict
|
||||
updateCfgDictStr = "-d %s"%base64.b64encode(json.dumps(updateCfgDict).encode()).decode()
|
||||
|
@ -522,10 +538,11 @@ if __name__ == "__main__":
|
|||
except:
|
||||
pass
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace("/", ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
uModule = dynamicLoadModule(fileName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
class_names = get_local_classes_in_order(fileName)
|
||||
case_class = getattr(uModule, class_names[-1])
|
||||
ucase = case_class()
|
||||
if (json.dumps(updateCfgDict) == '{}'):
|
||||
updateCfgDict = ucase.updatecfgDict
|
||||
if (json.dumps(adapter_cfg_dict) == '{}'):
|
||||
|
|
Loading…
Reference in New Issue