Merge pull request #30171 from taosdata/merge/3.0to3.3.6

merge: from 3.0 to 3.3.6
This commit is contained in:
Simon Guan 2025-03-14 11:35:13 +08:00 committed by GitHub
commit 79930fe8f7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
147 changed files with 17055 additions and 355 deletions

View File

@ -10,6 +10,13 @@ on:
paths-ignore: paths-ignore:
- 'packaging/**' - 'packaging/**'
- 'docs/**' - 'docs/**'
- 'tools/tdgpt/**'
- 'source/libs/executor/src/forecastoperator.c'
- 'source/libs/executor/src/anomalywindowoperator.c'
- 'include/common/tanalytics.h'
- 'source/common/src/tanalytics.c'
- 'tests/parallel/tdgpt_cases.task'
- 'tests/script/tsim/analytics'
concurrency: concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-TDengine group: ${{ github.workflow }}-${{ github.ref }}-TDengine
@ -19,76 +26,19 @@ env:
WKC: '/var/lib/jenkins/workspace/TDinternal/community' WKC: '/var/lib/jenkins/workspace/TDinternal/community'
jobs: jobs:
fetch-parameters:
runs-on:
group: CI
labels: [self-hosted, Linux, X64, testing]
outputs:
run_function_test: ${{ steps.parameters.outputs.run_function_test }}
run_tdgpt_test: ${{ steps.parameters.outputs.run_tdgpt_test }}
steps:
- name: Determine trigger source and fetch parameters
id: parameters
run: |
set -euo pipefail
target_branch=${{ github.event.pull_request.base.ref }}
# Check whether to run tdgpt test cases
cd ${{ env.WKC }}
changed_files_non_doc=$(git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD $target_branch) | grep -v "^docs/en/" | grep -v "^docs/zh/" | grep -v ".md$" | tr '\n' ' ' || :)
echo "changed files exclude doc: ${changed_files_non_doc}"
if [[ -n "$changed_files_non_doc" && "$changed_files_non_doc" =~ (forecastoperator\.c|anomalywindowoperator\.c|tanalytics\.h|tanalytics\.c|tdgpt_cases\.task|analytics|tdgpt) ]]; then
run_tdgpt_test="true"
else
run_tdgpt_test="false"
fi
echo "run tdgpt test: ${run_tdgpt_test}"
# Check whether to run function test cases
changed_files_non_tdgpt=$(git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD $target_branch) | \
grep -v "^docs/en/" | \
grep -v "^docs/zh/" | \
grep -v ".md$" | \
grep -Ev "forecastoperator\.c|anomalywindowoperator\.c|tanalytics\.h|tanalytics\.c|tdgpt_cases\.task|analytics|tdgpt" | \
tr '\n' ' ' || :)
echo "changed files exclude tdgpt: ${changed_files_non_tdgpt}"
if [ -n "$changed_files_non_tdgpt" ]; then
run_function_test="true"
else
run_function_test="false"
fi
echo "run function test: ${run_function_test}"
# Output the results for GitHub Actions
echo "run_function_test=$run_function_test" >> $GITHUB_OUTPUT
echo "run_tdgpt_test=$run_tdgpt_test" >> $GITHUB_OUTPUT
echo ${{ github.event.pull_request.head.ref }}
echo ${{ github.event.pull_request.base.ref }}
echo ${{ github.event.pull_request.number }}
run-tests-on-linux: run-tests-on-linux:
uses: taosdata/.github/.github/workflows/run-tests-on-linux.yml@main uses: taosdata/.github/.github/workflows/run-tests-on-linux.yml@main
needs: fetch-parameters
if: ${{ needs.fetch-parameters.outputs.run_tdgpt_test == 'true' || needs.fetch-parameters.outputs.run_function_test == 'true' }}
with: with:
tdinternal: false tdinternal: false
run_function_test: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }} run_function_test: true
run_tdgpt_test: ${{ needs.fetch-parameters.outputs.run_tdgpt_test == 'true' }} run_tdgpt_test: false
run-tests-on-mac: run-tests-on-mac:
uses: taosdata/.github/.github/workflows/run-tests-on-macos.yml@main uses: taosdata/.github/.github/workflows/run-tests-on-macos.yml@main
needs: fetch-parameters
if: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
with: with:
tdinternal: false tdinternal: false
run-tests-on-windows: run-tests-on-windows:
uses: taosdata/.github/.github/workflows/run-tests-on-windows.yml@main uses: taosdata/.github/.github/workflows/run-tests-on-windows.yml@main
needs: fetch-parameters
if: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
with: with:
tdinternal: false tdinternal: false

View File

@ -7,7 +7,6 @@ on:
- '3.0' - '3.0'
paths: paths:
- 'docs/**' - 'docs/**'
- '*.md'
env: env:
DOC_WKC: "/root/doc_ci_work" DOC_WKC: "/root/doc_ci_work"

View File

@ -21,19 +21,18 @@ include(${TD_SUPPORT_DIR}/cmake.version)
include(${TD_SUPPORT_DIR}/cmake.install) include(${TD_SUPPORT_DIR}/cmake.install)
set_property(GLOBAL PROPERTY GLOBAL_DEPENDS_NO_CYCLES OFF) set_property(GLOBAL PROPERTY GLOBAL_DEPENDS_NO_CYCLES OFF)
add_library(api INTERFACE)
target_include_directories(api INTERFACE "include/client")
add_subdirectory(contrib) add_subdirectory(contrib)
add_subdirectory(source)
add_subdirectory(tools)
add_subdirectory(utils)
add_subdirectory(tests)
add_subdirectory(docs/doxgen)
if(${BUILD_TEST}) if(${BUILD_TEST})
include(CTest) include(CTest)
enable_testing() enable_testing()
add_subdirectory(examples/c) add_subdirectory(examples/c)
endif(${BUILD_TEST}) endif(${BUILD_TEST})
add_library(api INTERFACE)
target_include_directories(api INTERFACE "include/client")
add_subdirectory(source)
add_subdirectory(tools)
add_subdirectory(utils)
add_subdirectory(tests)
add_subdirectory(docs/doxgen)

View File

@ -70,6 +70,8 @@ TDengine is an open source, high-performance, cloud native [time-series database
For a full list of TDengine competitive advantages, please [check here](https://tdengine.com/tdengine/). The easiest way to experience TDengine is through [TDengine Cloud](https://cloud.tdengine.com). For a full list of TDengine competitive advantages, please [check here](https://tdengine.com/tdengine/). The easiest way to experience TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
For the latest TDengine component TDgpt, please refer to [TDgpt README](./tools/tdgpt/README.md) for details.
# 2. Documentation # 2. Documentation
For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.tdengine.com) ([TDengine 文档](https://docs.taosdata.com)) For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.tdengine.com) ([TDengine 文档](https://docs.taosdata.com))

View File

@ -14,7 +14,7 @@ Power BI is a business analytics tool provided by Microsoft. By configuring the
## Configure Data Source ## Configure Data Source
**Step 1**, Search and open the [ODBC Data Source (64 bit)] management tool in the Start menu of the Windows operating system and configure it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#Installation). **Step 1**, Search and open the [ODBC Data Source (64 bit)] management tool in the Start menu of the Windows operating system and configure it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#installation).
**Step 2**, Open Power BI and log in, click [Home] -> [Get Data] -> [Other] -> [ODBC] -> [Connect], add data source. **Step 2**, Open Power BI and log in, click [Home] -> [Get Data] -> [Other] -> [ODBC] -> [Connect], add data source.

View File

@ -13,11 +13,11 @@ Prepare the following environment:
- TDengine 3.3.5.8 and above version is installed and running normally (both Enterprise and Community versions are available). - TDengine 3.3.5.8 and above version is installed and running normally (both Enterprise and Community versions are available).
- taosAdapter is running normally, refer to [taosAdapter Reference](../../../tdengine-reference/components/taosadapter/). - taosAdapter is running normally, refer to [taosAdapter Reference](../../../tdengine-reference/components/taosadapter/).
- Install and run Tableau Desktop (if not installed, please download and install Windows operating system 64-bit [Download Tableau Desktop](https://www.tableau.com/products/desktop/download)). Install Tableau please refer to [Tableau Desktop](https://www.tableau.com). - Install and run Tableau Desktop (if not installed, please download and install Windows operating system 64-bit [Download Tableau Desktop](https://www.tableau.com/products/desktop/download)). Install Tableau please refer to [Tableau Desktop](https://www.tableau.com).
- Download the latest Windows operating system X64 client driver from the TDengine official website and install it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#Installation). - Download the latest Windows operating system X64 client driver from the TDengine official website and install it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#installation).
## Configure Data Source ## Configure Data Source
**Step 1**, Search and open the "ODBC Data Source (64 bit)" management tool in the Start menu of the Windows operating system and configure it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#Installation). **Step 1**, Search and open the "ODBC Data Source (64 bit)" management tool in the Start menu of the Windows operating system and configure it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#installation).
:::tip :::tip
It should be noted that when configuring the ODBC data source for Tableau, the [Database] configuration item on the TDengine ODBC data source configuration page is required. You need to select a database that can be successfully connected. It should be noted that when configuring the ODBC data source for Tableau, the [Database] configuration item on the TDengine ODBC data source configuration page is required. You need to select a database that can be successfully connected.
@ -27,19 +27,19 @@ It should be noted that when configuring the ODBC data source for Tableau, the [
**Step 3**, Click the `DSN` radio button, then select the configured data source (MyTDengine), and click the `Connect` button. After the connection is successful, delete the content of the string attachment, and finally click the `Sign In` button. **Step 3**, Click the `DSN` radio button, then select the configured data source (MyTDengine), and click the `Connect` button. After the connection is successful, delete the content of the string attachment, and finally click the `Sign In` button.
![tableau-odbc](./tableau/tableau-odbc.jpg) ![tableau-odbc](./tableau/tableau-odbc.webp)
## Data Analysis ## Data Analysis
**Step 1**, In the workbook page, the connected data sources will be displayed. Clicking on the dropdown list of databases will display the databases that require data analysis. On this basis, click the search button in the table options to display all tables in the database. Then, drag the table to be analyzed to the right area to display the table structure. **Step 1**, In the workbook page, the connected data sources will be displayed. Clicking on the dropdown list of databases will display the databases that require data analysis. On this basis, click the search button in the table options to display all tables in the database. Then, drag the table to be analyzed to the right area to display the table structure.
![tableau-workbook](./tableau/tableau-table.jpg) ![tableau-workbook](./tableau/tableau-table.webp)
**Step 2**, Click the `Update Now` button below to display the data in the table. **Step 2**, Click the `Update Now` button below to display the data in the table.
![tableau-workbook](./tableau/tableau-data.jpg) ![tableau-workbook](./tableau/tableau-data.webp)
**Step 3**, Click on the "Worksheet" at the bottom of the window to pop up the data analysis window, which displays all the fields of the analysis table. Drag the fields to the rows and columns to display the chart. **Step 3**, Click on the "Worksheet" at the bottom of the window to pop up the data analysis window, which displays all the fields of the analysis table. Drag the fields to the rows and columns to display the chart.
![tableau-workbook](./tableau/tableau-analysis.jpg) ![tableau-workbook](./tableau/tableau-analysis.webp)

View File

@ -13,30 +13,30 @@ Prepare the following environment:
- TDengine 3.3.5.8 and above version is installed and running normally (both Enterprise and Community versions are available). - TDengine 3.3.5.8 and above version is installed and running normally (both Enterprise and Community versions are available).
- taosAdapter is running normally, refer to [taosAdapter Reference](../../../tdengine-reference/components/taosadapter/). - taosAdapter is running normally, refer to [taosAdapter Reference](../../../tdengine-reference/components/taosadapter/).
- Install and run Excel. If not installed, please download and install it. For specific instructions, please refer to Microsoft's official documentation. - Install and run Excel. If not installed, please download and install it. For specific instructions, please refer to Microsoft's official documentation.
- Download the latest Windows operating system X64 client driver from the TDengine official website and install it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#Installation). - Download the latest Windows operating system X64 client driver from the TDengine official website and install it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#installation).
## Configure Data Source ## Configure Data Source
**Step 1**, Search and open the [ODBC Data Source (64 bit)] management tool in the Start menu of the Windows operating system and configure it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#Installation). **Step 1**, Search and open the [ODBC Data Source (64 bit)] management tool in the Start menu of the Windows operating system and configure it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#installation).
**Step 2**, Start Excel in the Windows system environment, then select [Data] -> [Get Data] -> [From Other Sources] -> [From ODBC]. **Step 2**, Start Excel in the Windows system environment, then select [Data] -> [Get Data] -> [From Other Sources] -> [From ODBC].
![excel-odbc](./excel/odbc-menu.jpg) ![excel-odbc](./excel/odbc-menu.webp)
**Step 3**, In the pop-up window, select the data source you need to connect to from the drop-down list of [Data source name (DSN)], and then click the [OK] button. **Step 3**, In the pop-up window, select the data source you need to connect to from the drop-down list of [Data source name (DSN)], and then click the [OK] button.
![excel-odbc](./excel/odbc-select.jpg) ![excel-odbc](./excel/odbc-select.webp)
**Step 4**, Enter the username and password for TDengine. **Step 4**, Enter the username and password for TDengine.
![excel-odbc](./excel/odbc-config.jpg) ![excel-odbc](./excel/odbc-config.webp)
**Step 5**, In the pop-up [Navigator] dialog box, select the database tables you want to load, and then click [Load] to complete the data loading. **Step 5**, In the pop-up [Navigator] dialog box, select the database tables you want to load, and then click [Load] to complete the data loading.
![excel-odbc](./excel/odbc-load.jpg) ![excel-odbc](./excel/odbc-load.webp)
## Data Analysis ## Data Analysis
Select the imported data. On the [Insert] tab, choose the column chart, and then configure the data fields in the [PivotChart Fields] pane on the right. Select the imported data. On the [Insert] tab, choose the column chart, and then configure the data fields in the [PivotChart Fields] pane on the right.
![excel-odbc](./excel/odbc-data.jpg) ![excel-odbc](./excel/odbc-data.webp)

View File

@ -0,0 +1,82 @@
---
sidebar_label: FineBI
title: Integration With FineBI
toc_max_heading_level: 4
---
Fanruan is a technology company specializing in the field of business intelligence and data analytics. With its self-developed core products, FineBI and FineReport, the company has established a leading position in the industry. Fanruan's BI tools are widely adopted by enterprises across various sectors, empowering users to achieve data visualization analysis, report generation, and data-driven decision support.
By using the TDengine Java connector, FineBI can quickly access the data in TDengine. Users can directly connect to the TDengine database in FineBI, obtain time-series data for analysis, and create visual reports, and the entire process does not require any code writing.
## Prerequisites
- TDengine 3.3.4.0 and above version is installed and running normally (both Enterprise and Community versions are available).
- taosAdapter is running normally, refer to [taosAdapter Reference](../../../tdengine-reference/components/taosadapter/).
- Install FineBI (if not installed, please download and install [Download FineBI](https://intl.finebi.com/download)).
- Download the fine_conf_entity plugin to support the addition of JDBC drivers, [Download link](https://market.fanruan.com/plugin/1052a471-0239-4cd8-b832-045d53182c5d).
- Install the JDBC driver. Download the `TDengine JDBC connector` file `taos-jdbcdriver-3.4.0-dist.jar` or a higher version from `maven.org`.
## Configure Data Source
**Step 1**, In the `db.script` configuration file of the FineBI server, find the `SystemConfig.driverUpload` configuration item and change its value to true.
- Windows system: The path of the configuration file is webapps/webroot/WEB-INF/embed/finedb/db.script under the installation directory.
- Linux/Mac system: The path of the configuration file is /usr/local/FineBI6.1/webapps/webroot/WEB-INF/embed/finedb/db.script.
**Step 2**, Start the FineBI service. Enter `http://ip:37799/webroot/decision` in the browser, where "ip" is the IP address of the FineBI server.
**Step 3**, After logging in to the FineBI Web page, click [System Management] -> [Plugin Management]. In the [Store App] on the right side, click [Install From Local] and select the downloaded `fine_conf_entity` plugin for installation.
![finebi-workbook](./finebi/plugin.webp)
**Step 4**, Click [System Management] -> [Data Connection] -> [Data Connection Management]. On the right-hand page, click the [Driver Management] button to open the configuration page. Then click the [New Driver] button, and in the pop-up window, enter a name (for example, `tdengine-websocket`) to configure the JDBC driver.
![finebi-workbook](./finebi/connect-manage.webp)
**Step 5**, On the driver configuration page, click the [Upload File] button. Select the downloaded TDengine Java Connector (e.g., `taos-jdbcdriver-3.4.0-dist.jar`) for uploading. After the upload is complete, select `com.taosdata.jdbc.ws.WebSocketDriver` from the drop-down list of [Driver], and then click [Save].
![finebi-workbook](./finebi/new-driver.webp)
**Step 6**, On the "Data Connection Management" page, click the [New Data Connection] button. Subsequently, click "Others", and then on the right-side page, click "Other JDBC" to perform the connection configuration.
![finebi-workbook](./finebi/jdbc-connect.webp)
**Step 7**, On the configuration page, first enter the name of the data connection. Then, select "Custom" in the [Driver] option and choose the configured driver from the drop-down list (e.g., `com.taosdata.jdbc.ws.WebSocketDriver (tdengine-websocket)`). After that, configure the "Data Connection URL" (e.g., `jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata&fineBIDialect=mysql`). Once the settings are completed, click [Test Connection] in the top-right corner to test the connection. After the verification is successful, click [Save] to finish the configuration.
:::tip
`fineBIDialect=mysql` The meaning of this setting is to adopt the SQL dialect rules of the MySQL database. Simply put, it tells FineBI to parse and execute relevant queries and operations in the specific way that the MySQL database handles SQL statements.
:::
![finebi-workbook](./finebi/jdbc-config.webp)
## Data Analysis
### Data preparation
**Step 1**, Click [Public Data]. On the right - hand page, click [New Folder] to create a folder (e.g., TDengine). Then, click the [+] button on the right side of the folder to create a "Database Table" dataset or an "SQL Dataset".
![finebi-workbook](./finebi/common.webp)
**Step 2**, Click "Database Table" to open the database table selection page. In the "Data Connection" section on the left, select the previously created connection. Then, all the tables in the database of the current connection will be displayed on the right. Select the table you need to load (e.g., meters), and click [OK]. The data in the meters table will then be displayed.
![finebi-workbook](./finebi/select-table.webp)
![finebi-workbook](./finebi/table-data.webp)
**Step 3**, Click "SQL Dataset" to open the configuration page for the SQL dataset. First, enter the table name (used for display on the FineBI page). Then, select the previously created connection from the drop-down list of "Data from Data Connection". After that, enter the SQL statement and click "Preview" to view the query results. Finally, click [OK] to successfully create the SQL dataset.
![finebi-workbook](./finebi/sql-data-config.webp)
### Smart Meter Example
**Step 1**, Click [My Analysis]. On the right-hand page, click [New Folder] to create a folder (for example, `TDengine`). Then, click the [+] button on the right side of the folder to create an "Analysis Subject".
![finebi-workbook](./finebi/analysis-object.webp)
**Step 2**, On the analysis subject page, select the dataset (for example, `meters`) and then click the [OK] button to complete the association of the dataset.
![finebi-workbook](./finebi/load-data.webp)
**Step 3**, Click the [Component] tab at the bottom of the analysis subject page to open the chart configuration page. Drag the fields to the horizontal axis or the vertical axis, and then the chart will be displayed.
![finebi-workbook](./finebi/analysis-chart.webp)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 300 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 470 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 761 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 324 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.3 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 769 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 659 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 286 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 505 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 205 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 243 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 389 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 255 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 543 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 226 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 593 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 107 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 189 KiB

View File

@ -371,10 +371,14 @@ Specify the configuration parameters for tag and data columns in `super_tables`
### Query Parameters ### Query Parameters
In query scenarios, `filetype` must be set to `query`. `filetype` must be set to `query`.
`query_mode` connect method:
- "taosc": Native.
- "rest" : RESTful.
`query_times` specifies the number of times to run the query, numeric type. `query_times` specifies the number of times to run the query, numeric type.
Query scenarios can control the execution of slow query statements by setting `kill_slow_query_threshold` and `kill_slow_query_interval` parameters, where threshold controls that queries exceeding the specified exec_usec time will be killed by taosBenchmark, in seconds; interval controls the sleep time to avoid continuous slow query CPU consumption, in seconds.
For other common parameters, see [General Configuration Parameters](#general-configuration-parameters) For other common parameters, see [General Configuration Parameters](#general-configuration-parameters)
@ -387,8 +391,21 @@ Configuration parameters for querying specified tables (can specify supertables,
The total number of queries(`General Query`) = the number of `sqls` * `query_times` * `threads` The total number of queries(`General Query`) = the number of `sqls` * `query_times` * `threads`
`Mixed Query`: `Mixed Query`:
All SQL statements in `sqls` are divided into `threads` groups, with each thread executing one group. Each SQL statement needs to execute `query_times` queries. All SQL statements in `sqls` are divided into `threads` groups, with each thread executing one group. Each SQL statement needs to execute `query_times` queries.
The total number of queries(`Mixed Query`) = the number of `sqls` * `query_times` The total number of queries(`Mixed Query`) = the number of `sqls` * `query_times`.
- **batch_query** : Batch query power switch.
"yes": indicates that it is enabled.
"no": indicates that it is not enabled, and other values report errors.
Batch query refers to dividing all SQL statements in SQL into `threads` groups, with each thread executing one group.
Each SQL statement is queried only once before exiting, and the main thread waits for all threads to complete before determining if the `query_interval` parameter is set. If sleep is required for a specified time, each thread group is restarted and the previous process is repeated until the number of queries is exhausted.
Functional limitations:
- Only supports scenarios where `mixed_query` is set to 'yes'.
- Restful queries are not supported, meaning `query_made` cannot be 'rest'.
- **query_interval** : Query interval, in millisecond, default is 0. - **query_interval** : Query interval, in millisecond, default is 0.
When the 'batch_query' switch is turned on, it indicates the interval time after each batch query is completed, When closed, it indicates the interval time between each SQL query completion.
If the execution time of the query exceeds the interval time, it will no longer wait. If the execution time of the query is less than the interval time, it is necessary to wait to make up for the interval time.
- **threads** : Number of threads executing the SQL query, default is 1. - **threads** : Number of threads executing the SQL query, default is 1.
- **sqls**: - **sqls**:
- **sql**: The SQL command to execute, required. - **sql**: The SQL command to execute, required.

View File

@ -462,8 +462,8 @@ This document details the server error codes that may be encountered when using
| 0x80002688 | Cannot use 'year' or 'month' as true_for duration | Use year or month as true_for_duration | Check and correct the SQL statement | | 0x80002688 | Cannot use 'year' or 'month' as true_for duration | Use year or month as true_for_duration | Check and correct the SQL statement |
| 0x80002689 | Invalid using cols function | Illegal using cols function | Check and correct the SQL statement | | 0x80002689 | Invalid using cols function | Illegal using cols function | Check and correct the SQL statement |
| 0x8000268A | Cols function's first param must be a select function that output a single row | The first parameter of the cols function should be a selection function | Check and correct the SQL statement | | 0x8000268A | Cols function's first param must be a select function that output a single row | The first parameter of the cols function should be a selection function | Check and correct the SQL statement |
| 0x8000268B | Invalid using cols function with multiple output columns | Illegal using the cols function for multiple column output | Check and correct the SQL statement | | 0x8000268B | Invalid using alias for cols function | Illegal cols function alias | Check and correct the SQL statement |
| 0x8000268C | Invalid using alias for cols function | Illegal cols function alias | Check and correct the SQL statement | | 0x8000268C | Join primary key col must be timestmap type | Join primary key data type error | Check and correct the SQL statement |
| 0x800026FF | Parser internal error | Internal error in parser | Preserve the scene and logs, report issue on GitHub | | 0x800026FF | Parser internal error | Internal error in parser | Preserve the scene and logs, report issue on GitHub |
| 0x80002700 | Planner internal error | Internal error in planner | Preserve the scene and logs, report issue on GitHub | | 0x80002700 | Planner internal error | Internal error in planner | Preserve the scene and logs, report issue on GitHub |
| 0x80002701 | Expect ts equal | JOIN condition validation failed | Preserve the scene and logs, report issue on GitHub | | 0x80002701 | Expect ts equal | JOIN condition validation failed | Preserve the scene and logs, report issue on GitHub |

View File

@ -26,18 +26,18 @@ Tableau 是一款知名的商业智能工具,它支持多种数据源,可方
**第 3 步**,点击 `DSN` 单选框,接着选择已配置好的数据源(MyTDengine),然后点击`连接`按钮。待连接成功后,删除字符串附加部分的内容,最后点击`登录`按钮即可。 **第 3 步**,点击 `DSN` 单选框,接着选择已配置好的数据源(MyTDengine),然后点击`连接`按钮。待连接成功后,删除字符串附加部分的内容,最后点击`登录`按钮即可。
![tableau-odbc](./tableau/tableau-odbc.jpg) ![tableau-odbc](./tableau/tableau-odbc.webp)
## 数据分析 ## 数据分析
**第 1 步**,在工作簿页面中,选择已连接的数据源。点击数据库的下拉列表,会显示需要进行数据分析的数据库。在此基础上,点击表选项中的查找按钮,即可将该数据库下的所有表显示出来。然后,拖动需要分析的表到右侧区域,即可显示出表结构。 **第 1 步**,在工作簿页面中,选择已连接的数据源。点击数据库的下拉列表,会显示需要进行数据分析的数据库。在此基础上,点击表选项中的查找按钮,即可将该数据库下的所有表显示出来。然后,拖动需要分析的表到右侧区域,即可显示出表结构。
![tableau-workbook](./tableau/tableau-table.jpg) ![tableau-workbook](./tableau/tableau-table.webp)
**第 2 步**,点击下方的"立即更新"按钮,即可将表中的数据展示出来。 **第 2 步**,点击下方的"立即更新"按钮,即可将表中的数据展示出来。
![tableau-workbook](./tableau/tableau-data.jpg) ![tableau-workbook](./tableau/tableau-data.webp)
**第 3 步**,点击窗口下方的"工作表",弹出数据分析窗口, 并展示分析表的所有字段,将字段拖动到行列即可展示出图表。 **第 3 步**,点击窗口下方的"工作表",弹出数据分析窗口, 并展示分析表的所有字段,将字段拖动到行列即可展示出图表。
![tableau-workbook](./tableau/tableau-analysis.jpg) ![tableau-workbook](./tableau/tableau-analysis.webp)

View File

@ -19,22 +19,22 @@ title: 与 Excel 集成
**第 2 步**,在 Windows 系统环境下启动 Excel之后选择【数据】->【获取数据】->【自其他源】->【从ODBC】。 **第 2 步**,在 Windows 系统环境下启动 Excel之后选择【数据】->【获取数据】->【自其他源】->【从ODBC】。
![excel-odbc](./excel/odbc-menu.jpg) ![excel-odbc](./excel/odbc-menu.webp)
**第 3 步**,在弹出窗口的【数据源名称(DSN)】下拉列表中选择需要连接的数据源后,点击【确定】按钮。 **第 3 步**,在弹出窗口的【数据源名称(DSN)】下拉列表中选择需要连接的数据源后,点击【确定】按钮。
![excel-odbc](./excel/odbc-select.jpg) ![excel-odbc](./excel/odbc-select.webp)
**第 4 步**,输入 TDengine 的用户名密码。 **第 4 步**,输入 TDengine 的用户名密码。
![excel-odbc](./excel/odbc-config.jpg) ![excel-odbc](./excel/odbc-config.webp)
**第 5 步**,在弹出的【导航器】对话框中,选择要加载的库表, 并点击【加载】完成数据加载。 **第 5 步**,在弹出的【导航器】对话框中,选择要加载的库表, 并点击【加载】完成数据加载。
![excel-odbc](./excel/odbc-load.jpg) ![excel-odbc](./excel/odbc-load.webp)
## 数据分析 ## 数据分析
选中导入的数据,在【插入】选项卡中选择柱状图,并且在右侧的【数据透视图】中配置数据字段。 选中导入的数据,在【插入】选项卡中选择柱状图,并且在右侧的【数据透视图】中配置数据字段。
![excel-odbc](./excel/odbc-data.jpg) ![excel-odbc](./excel/odbc-data.webp)

View File

@ -0,0 +1,83 @@
---
sidebar_label: FineBI
title: 与 FineBI 集成
---
帆软是一家专注于商业智能与数据分析领域的科技企业,凭借自主研发的 FineBI 和 FineReport 两款核心产品在行业内占据重要地位。帆软的 BI 工具广泛应用于各类企业,帮助用户实现数据的可视化分析、报表生成和数据决策支持。
通过使用 `TDengine Java connector` 连接器FineBI 可以快速访问 TDengine 的数据。用户可以在 FineBI 中直接连接 TDengine 数据库,获取时序数据进行分析并制作可视化报表,整个过程不需要任何代码编写过程。
## 前置条件
准备以下环境:
- TDengine 3.3.4.0 以上版本集群已部署并正常运行(企业及社区版均可)。
- taosAdapter 能够正常运行,详细参考 [taosAdapter 参考手册](../../../reference/components/taosadapter)。
- FineBI 安装(如未安装,请下载并安装 [FineBI 下载](https://www.finebi.com/product/download))。
- 下载 `fine_conf_entity` 插件用于支持允许添加JDBC驱动, [下载地址](https://market.fanruan.com/plugin/1052a471-0239-4cd8-b832-045d53182c5d)。
- 安装 JDBC 驱动。从 `maven.org` 下载 `TDengine JDBC` 连接器文件 `taos-jdbcdriver-3.4.0-dist.jar` 或以上版本。
## 配置数据源
**第 1 步**,在 FineBI 服务端 `db.script` 配置文件中,找到 `SystemConfig.driverUpload` 配置项并将其修改为 `true`
- Windows 系统:配置文件路径是安装目录下 `webapps/webroot/WEB-INF/embed/finedb/db.script`
- Liunx/Mac 系统:配置文件路径是 `/usr/local/FineBI6.1/webapps/webroot/WEB-INF/embed/finedb/db.script`
**第 2 步**,启动 FineBI 服务,在浏览器中输入 `http://ip:37799/webroot/decision`, 其中 ip 是 FineBI 服务端 ip 地址。
**第 3 步** 打开 FineBI Web 页面登录后,点击【管理系统】->【插件管理】,在右侧的【应用商城】中点击【从本地安装】选择已下载的 `fine_conf_entity` 插件进行安装。
![finebi-workbook](./finebi/plugin.webp)
**第 4 步**,点击【管理系统】->【数据连接】->【数据连接管理】,在右侧页面中点击【驱动管理】按钮打开配置页面,点击【新建驱动】按钮并在弹出窗口中输入名称(比如 `tdengine-websocket`),进行 JDBC 驱动配置。
![finebi-workbook](./finebi/connect-manage.webp)
**第 5 步**,在驱动配置页面中点击【上传文件】按钮,选择已下载的 `TDengine Java Connector`(比如 `taos-jdbcdriver-3.4.0-dist.jar`)进行上传,上传完成后在【驱动】的下拉列表中选择 `com.taosdata.jdbc.ws.WebSocketDriver`,并点击【保存】。
![finebi-workbook](./finebi/new-driver.webp)
**第 6 步**,在 “数据连接管理” 页面中,点击【新建数据连接】按钮,随后点击 “其他” ,在右侧页面中点击 “其他JDBC” 进行连接配置。
![finebi-workbook](./finebi/jdbc-connect.webp)
**第 7 步**,在配置页面,先输入数据连接名称,接着在【驱动】选项中选择 “自定义”,并从下拉列表里选取已配置的驱动(例如 `com.taosdata.jdbc.ws.WebSocketDriver (tdengine-websocket)`),之后配置 “数据连接 URL”例如 `jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata&fineBIDialect=mysql`)。设置完成后,点击右上角的【测试连接】进行连接测试,验证成功后点击【保存】即可完成配置。
:::tip
`fineBIDialect=mysql` 设置的含义是采用 MySQL 数据库的 SQL 方言规则。简单来说,就是告诉 FineBI 按照 MySQL 数据库处理 SQL 语句的特定方式来解析和执行相关的查询与操作。
:::
![finebi-workbook](./finebi/jdbc-config.webp)
## 数据分析
### 数据准备
**第 1 步**,点击【公共数据】在右侧页面中点击【新建文件夹】即可创建一个文件夹(比如 `TDengine` 接着在文件夹的右侧点击【+】按钮,可创建 “数据库表” 数据集或 “SQL数据集”。
![finebi-workbook](./finebi/common.webp)
**第 2 步**,点击 “数据库表”,打开数据库选表页面,在左侧 “数据连接” 中选择已创建的连接,则在右侧会显示当前连接的数据库中的所有表,选择需要加载的表(比如 `meters`),点击【确定】即可显示 `meters` 表中的数据。
![finebi-workbook](./finebi/select-table.webp)
![finebi-workbook](./finebi/table-data.webp)
**第 3 步**,点击 “SQL数据集”打开 SQL 数据集的配置页面,首先输入表名(用于在 FineBI 页面显示),接着在 “数据来自数据连接” 下拉列表中选择已创建的连接, 之后输入 SQL 语句并点击预览即可看到查询结果最后点击【确定】SQL 数据集即可创建成功。
![finebi-workbook](./finebi/sql-data-config.webp)
### 智能电表样例
**第 1 步**,点击【我的分析】在右侧页面中点击【新建文件夹】即可创建一个文件夹(比如 `TDengine` 接着在文件夹的右侧点击【+】按钮,可创建 “分析主题”。
![finebi-workbook](./finebi/analysis-object.webp)
**第 2 步**,在分析主题页面选择数据集(比如 `meters`)后点击【确定】按钮,即可完成数据集关联。
![finebi-workbook](./finebi/load-data.webp)
**第 3 步**,点击分析主题页面下方的【组件】标签,打开图表配置页面, 拖动字段到横轴或纵轴即可展示出图表。
![finebi-workbook](./finebi/analysis-chart.webp)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 305 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 481 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 753 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 338 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.3 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 773 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 643 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 288 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 492 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 209 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 160 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 171 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 285 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 235 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 398 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 238 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 514 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 237 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 610 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 102 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 181 KiB

View File

@ -280,27 +280,45 @@ taosBenchmark -f <json file>
### 查询配置参数 ### 查询配置参数
查询场景下 `filetype` 必须设置为 `query` 查询场景下 `filetype` 必须设置为 `query`
`query_mode` 查询连接方式,取值为:
- “taosc”: 通过 Native 连接方式查询。
- “rest” : 通过 restful 连接方式查询。
`query_times` 指定运行查询的次数,数值类型。 `query_times` 指定运行查询的次数,数值类型。
查询场景可以通过设置 `kill_slow_query_threshold``kill_slow_query_interval` 参数来控制杀掉慢查询语句的执行threshold 控制如果 exec_usec 超过指定时间的查询将被 taosBenchmark 杀掉,单位为秒。
interval 控制休眠时间,避免持续查询慢查询消耗 CPU单位为秒。
其它通用参数详见 [通用配置参数](#通用配置参数) 其它通用参数详见 [通用配置参数](#通用配置参数)。
#### 执行指定查询语句 #### 执行指定查询语句
查询指定表(可以指定超级表、子表或普通表)的配置参数在 `specified_table_query` 中设置。 查询指定表(可以指定超级表、子表或普通表)的配置参数在 `specified_table_query` 中设置。
- **mixed_query**:查询模式
“yes”`混合查询` - **mixed_query**:混合查询开关。
"no"(默认值)`普通查询` “yes”: 开启 “混合查询”。
`普通查询``sqls` 中每个 sql 启动 `threads` 个线程查询此 sql执行完 `query_times` 次查询后退出,执行此 sql 的所有线程都完成后进入下一个 sql “no” : 关闭 “混合查询” ,即 “普通查询”。
- 普通查询:
`sqls` 中每个 sql 启动 `threads` 个线程查询此 sql, 执行完 `query_times` 次查询后退出,执行此 sql 的所有线程都完成后进入下一个 sql
`查询总次数` = `sqls` 个数 * `query_times` * `threads` `查询总次数` = `sqls` 个数 * `query_times` * `threads`
`混合查询``sqls` 中所有 sql 分成 `threads` 个组,每个线程执行一组,每个 sql 都需执行 `query_times` 次查询 - 混合查询:
`sqls` 中所有 sql 分成 `threads` 个组,每个线程执行一组, 每个 sql 都需执行 `query_times` 次查询
`查询总次数` = `sqls` 个数 * `query_times` `查询总次数` = `sqls` 个数 * `query_times`
- **batch_query**:批查询功开关。
取值范围 “yes” 表示开启,"no" 不开启,其它值报错。
批查询是指 `sqls` 中所有 sql 分成 `threads` 个组,每个线程执行一组,每个 sql 只执行一次查询后退出,主线程等待所有线程都执行完,再判断是否设置有 `query_interval` 参数,如果有需要 sleep 指定时间,再启动各线程组重复前面的过程,直到查询次数耗尽为止。
功能限制条件:
- 只支持 `mixed_query` 为 "yes" 的场景。
- 不支持 restful 查询,即 `query_mode` 不能为 "rest"。
- **query_interval**查询时间间隔单位millisecond默认值为 0。 - **query_interval**查询时间间隔单位millisecond默认值为 0。
"batch_query" 开关打开时,表示是每批查询完间隔时间;关闭时,表示每个 sql 查询完间隔时间
如果执行查询的时间超过间隔时间,那么将不再等待,如果执行查询的时间不足间隔时间,需等待补足间隔时间
- **threads**:执行查询 SQL 的线程数,默认值为 1。 - **threads**:执行查询 SQL 的线程数,默认值为 1。

View File

@ -479,8 +479,8 @@ description: TDengine 服务端的错误码列表和详细说明
| 0x80002688 | Cannot use 'year' or 'month' as true_for duration | 不能使用 n(月), y(年) 作为 true_for 的时间单位 | 检查并修正 SQL 语句 | | 0x80002688 | Cannot use 'year' or 'month' as true_for duration | 不能使用 n(月), y(年) 作为 true_for 的时间单位 | 检查并修正 SQL 语句 |
| 0x80002689 | Invalid using cols function | cols 函数使用错误 | 检查并修正 SQL 语句 | | 0x80002689 | Invalid using cols function | cols 函数使用错误 | 检查并修正 SQL 语句 |
| 0x8000268A | Cols function's first param must be a select function that output a single row | cols 函数第一个参数应该为选择函数 | 检查并修正 SQL 语句 | | 0x8000268A | Cols function's first param must be a select function that output a single row | cols 函数第一个参数应该为选择函数 | 检查并修正 SQL 语句 |
| 0x8000268B | Invalid using cols function with multiple output columns | 多列输出的 cols 函数使用错误 | 检查并修正 SQL 语句 | | 0x8000268B | Invalid using alias for cols function | cols 函数输出列重命名错误 | 检查并修正 SQL 语句 |
| 0x8000268C | Invalid using alias for cols function | cols 函数输出列重命名错误 | 检查并修正 SQL 语句 | | 0x8000268C | Join primary key col must be timestmap type | 关联查询主键列等值条件类型错误 | 检查并修正 SQL 语句 |
| 0x800026FF | Parser internal error | 解析器内部错误 | 保留现场和日志github上报issue | | 0x800026FF | Parser internal error | 解析器内部错误 | 保留现场和日志github上报issue |
| 0x80002700 | Planner internal error | 计划期内部错误 | 保留现场和日志github上报issue | | 0x80002700 | Planner internal error | 计划期内部错误 | 保留现场和日志github上报issue |
| 0x80002701 | Expect ts equal | JOIN 条件校验失败 | 保留现场和日志github上报issue | | 0x80002701 | Expect ts equal | JOIN 条件校验失败 | 保留现场和日志github上报issue |

View File

@ -156,6 +156,13 @@ typedef struct SJoinLogicNode {
bool hashJoinHint; bool hashJoinHint;
bool batchScanHint; bool batchScanHint;
// FOR CONST JOIN
bool noPrimKeyEqCond;
bool leftConstPrimGot;
bool rightConstPrimGot;
bool leftNoOrderedSubQuery;
bool rightNoOrderedSubQuery;
// FOR HASH JOIN // FOR HASH JOIN
int32_t timeRangeTarget; // table onCond filter int32_t timeRangeTarget; // table onCond filter
STimeWindow timeRange; // table onCond filter STimeWindow timeRange; // table onCond filter

View File

@ -57,10 +57,13 @@ typedef struct SExprNode {
SDataType resType; SDataType resType;
char aliasName[TSDB_COL_NAME_LEN]; char aliasName[TSDB_COL_NAME_LEN];
char userAlias[TSDB_COL_NAME_LEN]; char userAlias[TSDB_COL_NAME_LEN];
char srcTable[TSDB_TABLE_NAME_LEN];
SArray* pAssociation; SArray* pAssociation;
bool asAlias; bool asAlias;
bool asParam; bool asParam;
bool asPosition; bool asPosition;
bool joinSrc;
//bool constValue;
int32_t projIdx; int32_t projIdx;
int32_t relatedTo; int32_t relatedTo;
int32_t bindExprID; int32_t bindExprID;
@ -209,6 +212,7 @@ typedef struct STableNode {
char tableAlias[TSDB_TABLE_NAME_LEN]; char tableAlias[TSDB_TABLE_NAME_LEN];
uint8_t precision; uint8_t precision;
bool singleTable; bool singleTable;
bool inJoin;
} STableNode; } STableNode;
struct STableMeta; struct STableMeta;
@ -291,6 +295,10 @@ typedef struct SJoinTableNode {
SNode* addPrimCond; SNode* addPrimCond;
bool hasSubQuery; bool hasSubQuery;
bool isLowLevelJoin; bool isLowLevelJoin;
bool leftNoOrderedSubQuery;
bool rightNoOrderedSubQuery;
//bool condAlwaysTrue;
//bool condAlwaysFalse;
SNode* pLeft; SNode* pLeft;
SNode* pRight; SNode* pRight;
SNode* pOnCond; SNode* pOnCond;
@ -706,6 +714,8 @@ int32_t mergeJoinConds(SNode** ppDst, SNode** ppSrc);
void rewriteExprAliasName(SExprNode* pNode, int64_t num); void rewriteExprAliasName(SExprNode* pNode, int64_t num);
bool isRelatedToOtherExpr(SExprNode* pExpr); bool isRelatedToOtherExpr(SExprNode* pExpr);
bool nodesContainsColumn(SNode* pNode);
int32_t nodesMergeNode(SNode** pCond, SNode** pAdditionalCond);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -33,6 +33,7 @@ pNode will be freed in API;
*/ */
int32_t scalarCalculateConstants(SNode *pNode, SNode **pRes); int32_t scalarCalculateConstants(SNode *pNode, SNode **pRes);
int32_t scalarCalculateConstantsFromDual(SNode *pNode, SNode **pRes); int32_t scalarCalculateConstantsFromDual(SNode *pNode, SNode **pRes);
int32_t scalarConvertOpValueNodeTs(SOperatorNode *node);
/* /*
pDst need to freed in caller pDst need to freed in caller

View File

@ -913,8 +913,8 @@ int32_t taosGetErrSize();
#define TSDB_CODE_PAR_TRUE_FOR_UNIT TAOS_DEF_ERROR_CODE(0, 0x2688) #define TSDB_CODE_PAR_TRUE_FOR_UNIT TAOS_DEF_ERROR_CODE(0, 0x2688)
#define TSDB_CODE_PAR_INVALID_COLS_FUNCTION TAOS_DEF_ERROR_CODE(0, 0x2689) #define TSDB_CODE_PAR_INVALID_COLS_FUNCTION TAOS_DEF_ERROR_CODE(0, 0x2689)
#define TSDB_CODE_PAR_INVALID_COLS_SELECTFUNC TAOS_DEF_ERROR_CODE(0, 0x268A) #define TSDB_CODE_PAR_INVALID_COLS_SELECTFUNC TAOS_DEF_ERROR_CODE(0, 0x268A)
#define TSDB_CODE_INVALID_MULITI_COLS_FUNC TAOS_DEF_ERROR_CODE(0, 0x268B) #define TSDB_CODE_PAR_INVALID_COLS_ALIAS TAOS_DEF_ERROR_CODE(0, 0x268B)
#define TSDB_CODE_INVALID_COLS_ALIAS TAOS_DEF_ERROR_CODE(0, 0x268C) #define TSDB_CODE_PAR_PRIM_KEY_MUST_BE_TS TAOS_DEF_ERROR_CODE(0, 0x268C)
#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF) #define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF)
//planner //planner

View File

@ -170,6 +170,8 @@ static void dmSetSignalHandle() {
#endif #endif
} }
extern bool generateNewMeta;
static int32_t dmParseArgs(int32_t argc, char const *argv[]) { static int32_t dmParseArgs(int32_t argc, char const *argv[]) {
global.startTime = taosGetTimestampMs(); global.startTime = taosGetTimestampMs();
@ -208,6 +210,8 @@ static int32_t dmParseArgs(int32_t argc, char const *argv[]) {
global.dumpSdb = true; global.dumpSdb = true;
} else if (strcmp(argv[i], "-dTxn") == 0) { } else if (strcmp(argv[i], "-dTxn") == 0) {
global.deleteTrans = true; global.deleteTrans = true;
} else if (strcmp(argv[i], "-r") == 0) {
generateNewMeta = true;
} else if (strcmp(argv[i], "-E") == 0) { } else if (strcmp(argv[i], "-E") == 0) {
if (i < argc - 1) { if (i < argc - 1) {
if (strlen(argv[++i]) >= PATH_MAX) { if (strlen(argv[++i]) >= PATH_MAX) {

View File

@ -257,22 +257,185 @@ void vnodeGetMetaPath(SVnode *pVnode, const char *metaDir, char *fname) {
snprintf(fname + offset, TSDB_FILENAME_LEN - offset - 1, "%s%s", TD_DIRSEP, metaDir); snprintf(fname + offset, TSDB_FILENAME_LEN - offset - 1, "%s%s", TD_DIRSEP, metaDir);
} }
bool generateNewMeta = false;
static int32_t metaGenerateNewMeta(SMeta **ppMeta) {
SMeta *pNewMeta = NULL;
SMeta *pMeta = *ppMeta;
SVnode *pVnode = pMeta->pVnode;
metaInfo("vgId:%d start to generate new meta", TD_VID(pMeta->pVnode));
// Open a new meta for organization
int32_t code = metaOpenImpl(pMeta->pVnode, &pNewMeta, VNODE_META_TMP_DIR, false);
if (code) {
return code;
}
code = metaBegin(pNewMeta, META_BEGIN_HEAP_NIL);
if (code) {
return code;
}
// i == 0, scan super table
// i == 1, scan normal table and child table
for (int i = 0; i < 2; i++) {
TBC *uidCursor = NULL;
int32_t counter = 0;
code = tdbTbcOpen(pMeta->pUidIdx, &uidCursor, NULL);
if (code) {
metaError("vgId:%d failed to open uid index cursor, reason:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
code = tdbTbcMoveToFirst(uidCursor);
if (code) {
metaError("vgId:%d failed to move to first, reason:%s", TD_VID(pVnode), tstrerror(code));
tdbTbcClose(uidCursor);
return code;
}
for (;;) {
const void *pKey;
int kLen;
const void *pVal;
int vLen;
if (tdbTbcGet(uidCursor, &pKey, &kLen, &pVal, &vLen) < 0) {
break;
}
tb_uid_t uid = *(tb_uid_t *)pKey;
SUidIdxVal *pUidIdxVal = (SUidIdxVal *)pVal;
if ((i == 0 && (pUidIdxVal->suid && pUidIdxVal->suid == uid)) // super table
|| (i == 1 && (pUidIdxVal->suid == 0 || pUidIdxVal->suid != uid)) // normal table and child table
) {
counter++;
if (i == 0) {
metaInfo("vgId:%d counter:%d new meta handle %s table uid:%" PRId64, TD_VID(pVnode), counter, "super", uid);
} else {
metaInfo("vgId:%d counter:%d new meta handle %s table uid:%" PRId64, TD_VID(pVnode), counter,
pUidIdxVal->suid == 0 ? "normal" : "child", uid);
}
// fetch table entry
void *value = NULL;
int valueSize = 0;
if (tdbTbGet(pMeta->pTbDb,
&(STbDbKey){
.version = pUidIdxVal->version,
.uid = uid,
},
sizeof(uid), &value, &valueSize) == 0) {
SDecoder dc = {0};
SMetaEntry me = {0};
tDecoderInit(&dc, value, valueSize);
if (metaDecodeEntry(&dc, &me) == 0) {
if (me.type == TSDB_CHILD_TABLE &&
tdbTbGet(pMeta->pUidIdx, &me.ctbEntry.suid, sizeof(me.ctbEntry.suid), NULL, NULL) != 0) {
metaError("vgId:%d failed to get super table uid:%" PRId64 " for child table uid:%" PRId64,
TD_VID(pVnode), me.ctbEntry.suid, uid);
} else if (metaHandleEntry2(pNewMeta, &me) != 0) {
metaError("vgId:%d failed to handle entry, uid:%" PRId64, TD_VID(pVnode), uid);
}
}
tDecoderClear(&dc);
}
tdbFree(value);
}
code = tdbTbcMoveToNext(uidCursor);
if (code) {
metaError("vgId:%d failed to move to next, reason:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
}
tdbTbcClose(uidCursor);
}
code = metaCommit(pNewMeta, pNewMeta->txn);
if (code) {
metaError("vgId:%d failed to commit, reason:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
code = metaFinishCommit(pNewMeta, pNewMeta->txn);
if (code) {
metaError("vgId:%d failed to finish commit, reason:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
if ((code = metaBegin(pNewMeta, META_BEGIN_HEAP_NIL)) != 0) {
metaError("vgId:%d failed to begin new meta, reason:%s", TD_VID(pVnode), tstrerror(code));
}
metaClose(&pNewMeta);
metaInfo("vgId:%d finish to generate new meta", TD_VID(pVnode));
// Commit the new metadata
char metaDir[TSDB_FILENAME_LEN] = {0};
char metaTempDir[TSDB_FILENAME_LEN] = {0};
char metaBackupDir[TSDB_FILENAME_LEN] = {0};
vnodeGetMetaPath(pVnode, metaDir, VNODE_META_DIR);
vnodeGetMetaPath(pVnode, metaTempDir, VNODE_META_TMP_DIR);
vnodeGetMetaPath(pVnode, metaBackupDir, VNODE_META_BACKUP_DIR);
metaClose(ppMeta);
if (taosRenameFile(metaDir, metaBackupDir) != 0) {
metaError("vgId:%d failed to rename old meta to backup, reason:%s", TD_VID(pVnode), tstrerror(terrno));
return terrno;
}
// rename the new meta to old meta
if (taosRenameFile(metaTempDir, metaDir) != 0) {
metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno));
return terrno;
}
code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, false);
if (code) {
metaError("vgId:%d failed to open new meta, reason:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
metaInfo("vgId:%d successfully opened new meta", TD_VID(pVnode));
return 0;
}
int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) {
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
char metaDir[TSDB_FILENAME_LEN] = {0}; char metaDir[TSDB_FILENAME_LEN] = {0};
char metaBackupDir[TSDB_FILENAME_LEN] = {0};
char metaTempDir[TSDB_FILENAME_LEN] = {0}; char metaTempDir[TSDB_FILENAME_LEN] = {0};
vnodeGetMetaPath(pVnode, VNODE_META_DIR, metaDir); vnodeGetMetaPath(pVnode, VNODE_META_DIR, metaDir);
vnodeGetMetaPath(pVnode, VNODE_META_BACKUP_DIR, metaBackupDir);
vnodeGetMetaPath(pVnode, VNODE_META_TMP_DIR, metaTempDir); vnodeGetMetaPath(pVnode, VNODE_META_TMP_DIR, metaTempDir);
// Check file states bool metaExists = taosCheckExistFile(metaDir);
if (!taosCheckExistFile(metaDir) && taosCheckExistFile(metaTempDir)) { bool metaBackupExists = taosCheckExistFile(metaBackupDir);
bool metaTempExists = taosCheckExistFile(metaTempDir);
if ((!metaBackupExists && !metaExists && metaTempExists) //
|| (metaBackupExists && !metaExists && !metaTempExists) //
|| (metaBackupExists && metaExists && metaTempExists) //
) {
metaError("vgId:%d, invalid meta state, please check!", TD_VID(pVnode));
return TSDB_CODE_FAILED;
} else if (!metaBackupExists && metaExists && metaTempExists) {
taosRemoveDir(metaTempDir);
} else if (metaBackupExists && !metaExists && metaTempExists) {
code = taosRenameFile(metaTempDir, metaDir); code = taosRenameFile(metaTempDir, metaDir);
if (code) { if (code) {
metaError("vgId:%d, %s failed at %s:%d since %s: rename %s to %s failed", TD_VID(pVnode), __func__, __FILE__, metaError("vgId:%d, %s failed at %s:%d since %s", TD_VID(pVnode), __func__, __FILE__, __LINE__, tstrerror(code));
__LINE__, tstrerror(code), metaTempDir, metaDir);
return code; return code;
} }
taosRemoveDir(metaBackupDir);
} else if (metaBackupExists && metaExists && !metaTempExists) {
taosRemoveDir(metaBackupDir);
} }
// Do open meta // Do open meta
@ -282,6 +445,14 @@ int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) {
return code; return code;
} }
if (generateNewMeta) {
code = metaGenerateNewMeta(ppMeta);
if (code) {
metaError("vgId:%d, %s failed at %s:%d since %s", TD_VID(pVnode), __func__, __FILE__, __LINE__, tstrerror(code));
return code;
}
}
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }

View File

@ -40,6 +40,11 @@ typedef enum EJoinTableType {
E_JOIN_TB_PROBE E_JOIN_TB_PROBE
} EJoinTableType; } EJoinTableType;
typedef enum EPrimExprType {
E_PRIM_TIMETRUNCATE = 1,
E_PRIM_VALUE
} EPrimExprType;
#define MJOIN_TBTYPE(_type) (E_JOIN_TB_BUILD == (_type) ? "BUILD" : "PROBE") #define MJOIN_TBTYPE(_type) (E_JOIN_TB_BUILD == (_type) ? "BUILD" : "PROBE")
#define IS_FULL_OUTER_JOIN(_jtype, _stype) ((_jtype) == JOIN_TYPE_FULL && (_stype) == JOIN_STYPE_OUTER) #define IS_FULL_OUTER_JOIN(_jtype, _stype) ((_jtype) == JOIN_TYPE_FULL && (_stype) == JOIN_STYPE_OUTER)
@ -87,9 +92,15 @@ typedef struct SMJoinNMatchCtx {
// for now timetruncate only // for now timetruncate only
typedef struct SMJoinPrimExprCtx { typedef struct SMJoinPrimExprCtx {
int64_t truncateUnit; EPrimExprType type;
int64_t timezoneUnit;
int32_t targetSlotId; // FOR TIMETRUNCATE
int64_t truncateUnit;
int64_t timezoneUnit;
int32_t targetSlotId;
// FOR VALUE
int64_t constTs;
} SMJoinPrimExprCtx; } SMJoinPrimExprCtx;
typedef struct SMJoinTableCtx { typedef struct SMJoinTableCtx {
@ -337,6 +348,8 @@ typedef struct SMJoinOperatorInfo {
#define PROBE_TS_NREACH(_asc, _pts, _bts) (((_asc) && (_pts) > (_bts)) || (!(_asc) && (_pts) < (_bts))) #define PROBE_TS_NREACH(_asc, _pts, _bts) (((_asc) && (_pts) > (_bts)) || (!(_asc) && (_pts) < (_bts)))
#define MJOIN_BUILD_BLK_OOR(_asc, _pts, _pidx, _bts, _bnum) (((_asc) && (*((int64_t*)(_pts) + (_pidx)) > *((int64_t*)(_bts) + (_bnum) - 1))) || ((!(_asc)) && (*((int64_t*)(_pts) + (_pidx)) < *((int64_t*)(_bts) + (_bnum) - 1)))) #define MJOIN_BUILD_BLK_OOR(_asc, _pts, _pidx, _bts, _bnum) (((_asc) && (*((int64_t*)(_pts) + (_pidx)) > *((int64_t*)(_bts) + (_bnum) - 1))) || ((!(_asc)) && (*((int64_t*)(_pts) + (_pidx)) < *((int64_t*)(_bts) + (_bnum) - 1))))
#define MJOIN_PRIM_EXPR_GOT(_pJoin) ((_pJoin)->probe->primCtx.type > 0 || (_pJoin)->build->primCtx.type > 0)
#define GRP_REMAIN_ROWS(_grp) ((_grp)->endIdx - (_grp)->readIdx + 1) #define GRP_REMAIN_ROWS(_grp) ((_grp)->endIdx - (_grp)->readIdx + 1)
#define GRP_DONE(_grp) ((_grp)->readIdx > (_grp)->endIdx) #define GRP_DONE(_grp) ((_grp)->readIdx > (_grp)->endIdx)

View File

@ -894,21 +894,7 @@ static int32_t mJoinInitFinColsInfo(SMJoinTableCtx* pTable, SNodeList* pList) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int32_t mJoinInitPrimExprCtx(SNode* pNode, SMJoinPrimExprCtx* pCtx, SMJoinTableCtx* pTable) { static int32_t mJoinInitFuncPrimExprCtx(SMJoinPrimExprCtx* pCtx, STargetNode* pTarget) {
if (NULL == pNode) {
pCtx->targetSlotId = pTable->primCol->srcSlot;
return TSDB_CODE_SUCCESS;
}
if (QUERY_NODE_TARGET != nodeType(pNode)) {
return TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
}
STargetNode* pTarget = (STargetNode*)pNode;
if (QUERY_NODE_FUNCTION != nodeType(pTarget->pExpr)) {
return TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
}
SFunctionNode* pFunc = (SFunctionNode*)pTarget->pExpr; SFunctionNode* pFunc = (SFunctionNode*)pTarget->pExpr;
if (FUNCTION_TYPE_TIMETRUNCATE != pFunc->funcType) { if (FUNCTION_TYPE_TIMETRUNCATE != pFunc->funcType) {
return TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; return TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
@ -939,6 +925,47 @@ static int32_t mJoinInitPrimExprCtx(SNode* pNode, SMJoinPrimExprCtx* pCtx, SMJoi
pCtx->timezoneUnit = offsetFromTz(varDataVal(pTimeZone->datum.p), TSDB_TICK_PER_SECOND(pFunc->node.resType.precision)); pCtx->timezoneUnit = offsetFromTz(varDataVal(pTimeZone->datum.p), TSDB_TICK_PER_SECOND(pFunc->node.resType.precision));
} }
pCtx->type = E_PRIM_TIMETRUNCATE;
return TSDB_CODE_SUCCESS;
}
static int32_t mJoinInitValPrimExprCtx(SMJoinPrimExprCtx* pCtx, STargetNode* pTarget) {
SValueNode* pVal = (SValueNode*)pTarget->pExpr;
if (TSDB_DATA_TYPE_TIMESTAMP != pVal->node.resType.type) {
return TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
}
pCtx->constTs = pVal->datum.i;
pCtx->type = E_PRIM_VALUE;
return TSDB_CODE_SUCCESS;
}
static int32_t mJoinInitPrimExprCtx(SNode* pNode, SMJoinPrimExprCtx* pCtx, SMJoinTableCtx* pTable) {
if (NULL == pNode) {
pCtx->targetSlotId = pTable->primCol->srcSlot;
return TSDB_CODE_SUCCESS;
}
if (QUERY_NODE_TARGET != nodeType(pNode)) {
qError("primary expr node is not target, type:%d", nodeType(pNode));
return TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
}
STargetNode* pTarget = (STargetNode*)pNode;
if (QUERY_NODE_FUNCTION != nodeType(pTarget->pExpr) && QUERY_NODE_VALUE != nodeType(pTarget->pExpr)) {
qError("Invalid primary expr node type:%d", nodeType(pTarget->pExpr));
return TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
}
if (QUERY_NODE_FUNCTION == nodeType(pTarget->pExpr)) {
MJ_ERR_RET(mJoinInitFuncPrimExprCtx(pCtx, pTarget));
} else if (QUERY_NODE_VALUE == nodeType(pTarget->pExpr)) {
MJ_ERR_RET(mJoinInitValPrimExprCtx(pCtx, pTarget));
}
pCtx->targetSlotId = pTarget->slotId; pCtx->targetSlotId = pTarget->slotId;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -1045,25 +1072,36 @@ int32_t mJoinLaunchPrimExpr(SSDataBlock* pBlock, SMJoinTableCtx* pTable) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
SMJoinPrimExprCtx* pCtx = &pTable->primCtx;
SColumnInfoData* pPrimIn = taosArrayGet(pBlock->pDataBlock, pTable->primCol->srcSlot);
if (NULL == pPrimIn) {
return TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
}
SColumnInfoData* pPrimOut = taosArrayGet(pBlock->pDataBlock, pTable->primCtx.targetSlotId); SColumnInfoData* pPrimOut = taosArrayGet(pBlock->pDataBlock, pTable->primCtx.targetSlotId);
if (NULL == pPrimOut) { if (NULL == pPrimOut) {
return TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; return TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
} }
if (0 != pCtx->timezoneUnit) { SMJoinPrimExprCtx* pCtx = &pTable->primCtx;
for (int32_t i = 0; i < pBlock->info.rows; ++i) { switch (pCtx->type) {
((int64_t*)pPrimOut->pData)[i] = ((int64_t*)pPrimIn->pData)[i] - (((int64_t*)pPrimIn->pData)[i] + pCtx->timezoneUnit) % pCtx->truncateUnit; case E_PRIM_TIMETRUNCATE: {
SColumnInfoData* pPrimIn = taosArrayGet(pBlock->pDataBlock, pTable->primCol->srcSlot);
if (NULL == pPrimIn) {
return TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
}
if (0 != pCtx->timezoneUnit) {
for (int32_t i = 0; i < pBlock->info.rows; ++i) {
((int64_t*)pPrimOut->pData)[i] = ((int64_t*)pPrimIn->pData)[i] - (((int64_t*)pPrimIn->pData)[i] + pCtx->timezoneUnit) % pCtx->truncateUnit;
}
} else {
for (int32_t i = 0; i < pBlock->info.rows; ++i) {
((int64_t*)pPrimOut->pData)[i] = ((int64_t*)pPrimIn->pData)[i] / pCtx->truncateUnit * pCtx->truncateUnit;
}
}
break;
} }
} else { case E_PRIM_VALUE: {
for (int32_t i = 0; i < pBlock->info.rows; ++i) { MJ_ERR_RET(colDataSetNItems(pPrimOut, 0, (char*)&pCtx->constTs, pBlock->info.rows, false));
((int64_t*)pPrimOut->pData)[i] = ((int64_t*)pPrimIn->pData)[i] / pCtx->truncateUnit * pCtx->truncateUnit; break;
} }
default:
break;
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;

View File

@ -57,7 +57,6 @@ void destroyStreamCountAggOperatorInfo(void* param) {
} }
destroyStreamBasicInfo(&pInfo->basic); destroyStreamBasicInfo(&pInfo->basic);
cleanupExprSupp(&pInfo->scalarSupp); cleanupExprSupp(&pInfo->scalarSupp);
clearGroupResInfo(&pInfo->groupResInfo); clearGroupResInfo(&pInfo->groupResInfo);
taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos); taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos);

View File

@ -56,7 +56,6 @@ void destroyStreamEventOperatorInfo(void* param) {
} }
destroyStreamBasicInfo(&pInfo->basic); destroyStreamBasicInfo(&pInfo->basic);
clearGroupResInfo(&pInfo->groupResInfo); clearGroupResInfo(&pInfo->groupResInfo);
taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos); taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos);
pInfo->pUpdated = NULL; pInfo->pUpdated = NULL;

View File

@ -2206,7 +2206,6 @@ void destroyStreamSessionAggOperatorInfo(void* param) {
} }
destroyStreamBasicInfo(&pInfo->basic); destroyStreamBasicInfo(&pInfo->basic);
cleanupExprSupp(&pInfo->scalarSupp); cleanupExprSupp(&pInfo->scalarSupp);
clearGroupResInfo(&pInfo->groupResInfo); clearGroupResInfo(&pInfo->groupResInfo);
taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos); taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos);
@ -4445,7 +4444,6 @@ void destroyStreamStateOperatorInfo(void* param) {
} }
destroyStreamBasicInfo(&pInfo->basic); destroyStreamBasicInfo(&pInfo->basic);
clearGroupResInfo(&pInfo->groupResInfo); clearGroupResInfo(&pInfo->groupResInfo);
taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos); taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos);
pInfo->pUpdated = NULL; pInfo->pUpdated = NULL;

View File

@ -102,9 +102,11 @@ static int32_t exprNodeCopy(const SExprNode* pSrc, SExprNode* pDst) {
COPY_OBJECT_FIELD(resType, sizeof(SDataType)); COPY_OBJECT_FIELD(resType, sizeof(SDataType));
COPY_CHAR_ARRAY_FIELD(aliasName); COPY_CHAR_ARRAY_FIELD(aliasName);
COPY_CHAR_ARRAY_FIELD(userAlias); COPY_CHAR_ARRAY_FIELD(userAlias);
COPY_CHAR_ARRAY_FIELD(srcTable);
COPY_SCALAR_FIELD(asAlias); COPY_SCALAR_FIELD(asAlias);
COPY_SCALAR_FIELD(asParam); COPY_SCALAR_FIELD(asParam);
COPY_SCALAR_FIELD(asPosition); COPY_SCALAR_FIELD(asPosition);
COPY_SCALAR_FIELD(joinSrc);
COPY_SCALAR_FIELD(projIdx); COPY_SCALAR_FIELD(projIdx);
COPY_SCALAR_FIELD(relatedTo); COPY_SCALAR_FIELD(relatedTo);
COPY_SCALAR_FIELD(bindExprID); COPY_SCALAR_FIELD(bindExprID);
@ -242,6 +244,7 @@ static int32_t tableNodeCopy(const STableNode* pSrc, STableNode* pDst) {
COPY_CHAR_ARRAY_FIELD(tableAlias); COPY_CHAR_ARRAY_FIELD(tableAlias);
COPY_SCALAR_FIELD(precision); COPY_SCALAR_FIELD(precision);
COPY_SCALAR_FIELD(singleTable); COPY_SCALAR_FIELD(singleTable);
COPY_SCALAR_FIELD(inJoin);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -321,6 +324,10 @@ static int32_t joinTableNodeCopy(const SJoinTableNode* pSrc, SJoinTableNode* pDs
CLONE_NODE_FIELD(addPrimCond); CLONE_NODE_FIELD(addPrimCond);
COPY_SCALAR_FIELD(hasSubQuery); COPY_SCALAR_FIELD(hasSubQuery);
COPY_SCALAR_FIELD(isLowLevelJoin); COPY_SCALAR_FIELD(isLowLevelJoin);
COPY_SCALAR_FIELD(leftNoOrderedSubQuery);
COPY_SCALAR_FIELD(rightNoOrderedSubQuery);
//COPY_SCALAR_FIELD(condAlwaysTrue);
//COPY_SCALAR_FIELD(condAlwaysFalse);
CLONE_NODE_FIELD(pLeft); CLONE_NODE_FIELD(pLeft);
CLONE_NODE_FIELD(pRight); CLONE_NODE_FIELD(pRight);
CLONE_NODE_FIELD(pOnCond); CLONE_NODE_FIELD(pOnCond);
@ -542,6 +549,11 @@ static int32_t logicJoinCopy(const SJoinLogicNode* pSrc, SJoinLogicNode* pDst) {
COPY_SCALAR_FIELD(grpJoin); COPY_SCALAR_FIELD(grpJoin);
COPY_SCALAR_FIELD(hashJoinHint); COPY_SCALAR_FIELD(hashJoinHint);
COPY_SCALAR_FIELD(batchScanHint); COPY_SCALAR_FIELD(batchScanHint);
COPY_SCALAR_FIELD(noPrimKeyEqCond);
COPY_SCALAR_FIELD(leftConstPrimGot);
COPY_SCALAR_FIELD(rightConstPrimGot);
COPY_SCALAR_FIELD(leftNoOrderedSubQuery);
COPY_SCALAR_FIELD(rightNoOrderedSubQuery);
CLONE_NODE_FIELD(pLeftOnCond); CLONE_NODE_FIELD(pLeftOnCond);
CLONE_NODE_FIELD(pRightOnCond); CLONE_NODE_FIELD(pRightOnCond);
COPY_SCALAR_FIELD(timeRangeTarget); COPY_SCALAR_FIELD(timeRangeTarget);

View File

@ -3280,3 +3280,76 @@ void rewriteExprAliasName(SExprNode* pNode, int64_t num) {
bool isRelatedToOtherExpr(SExprNode* pExpr) { bool isRelatedToOtherExpr(SExprNode* pExpr) {
return pExpr->relatedTo != 0; return pExpr->relatedTo != 0;
} }
typedef struct SContainsColCxt {
bool containsCol;
} SContainsColCxt;
static EDealRes nodeContainsCol(SNode* pNode, void* pContext) {
SContainsColCxt* pCxt = pContext;
if (QUERY_NODE_COLUMN == nodeType(pNode)) {
pCxt->containsCol = true;
return DEAL_RES_END;
}
return DEAL_RES_CONTINUE;
}
bool nodesContainsColumn(SNode* pNode) {
if (NULL == pNode) {
return false;
}
SContainsColCxt cxt = {0};
nodesWalkExpr(pNode, nodeContainsCol, &cxt);
return cxt.containsCol;
}
int32_t mergeNodeToLogic(SNode** pDst, SNode** pSrc) {
SLogicConditionNode* pLogicCond = NULL;
int32_t code = nodesMakeNode(QUERY_NODE_LOGIC_CONDITION, (SNode**)&pLogicCond);
if (NULL == pLogicCond) {
return code;
}
pLogicCond->node.resType.type = TSDB_DATA_TYPE_BOOL;
pLogicCond->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_BOOL].bytes;
pLogicCond->condType = LOGIC_COND_TYPE_AND;
code = nodesListMakeAppend(&pLogicCond->pParameterList, *pSrc);
if (TSDB_CODE_SUCCESS == code) {
*pSrc = NULL;
code = nodesListMakeAppend(&pLogicCond->pParameterList, *pDst);
}
if (TSDB_CODE_SUCCESS == code) {
*pDst = (SNode*)pLogicCond;
} else {
nodesDestroyNode((SNode*)pLogicCond);
}
return code;
}
int32_t nodesMergeNode(SNode** pCond, SNode** pAdditionalCond) {
if (NULL == *pCond) {
TSWAP(*pCond, *pAdditionalCond);
return TSDB_CODE_SUCCESS;
}
int32_t code = TSDB_CODE_SUCCESS;
if (QUERY_NODE_LOGIC_CONDITION == nodeType(*pCond) &&
LOGIC_COND_TYPE_AND == ((SLogicConditionNode*)*pCond)->condType) {
code = nodesListAppend(((SLogicConditionNode*)*pCond)->pParameterList, *pAdditionalCond);
if (TSDB_CODE_SUCCESS == code) {
*pAdditionalCond = NULL;
}
} else {
code = mergeNodeToLogic(pCond, pAdditionalCond);
}
return code;
}

View File

@ -42,12 +42,12 @@ int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, SSDa
int32_t translatePostCreateSmaIndex(SParseContext* pParseCxt, SQuery* pQuery, SSDataBlock* pBlock); int32_t translatePostCreateSmaIndex(SParseContext* pParseCxt, SQuery* pQuery, SSDataBlock* pBlock);
int32_t translatePostCreateTSMA(SParseContext* pParseCxt, SQuery* pQuery, SSDataBlock* pBlock); int32_t translatePostCreateTSMA(SParseContext* pParseCxt, SQuery* pQuery, SSDataBlock* pBlock);
int32_t buildQueryAfterParse(SQuery** pQuery, SNode* pRootNode, int16_t placeholderNo, SArray** pPlaceholderValues); int32_t buildQueryAfterParse(SQuery** pQuery, SNode* pRootNode, int16_t placeholderNo, SArray** pPlaceholderValues);
int32_t translateTable(STranslateContext* pCxt, SNode** pTable, SNode* pJoinParent); int32_t translateTable(STranslateContext* pCxt, SNode** pTable, bool inJoin);
int32_t getMetaDataFromHash(const char* pKey, int32_t len, SHashObj* pHash, void** pOutput); int32_t getMetaDataFromHash(const char* pKey, int32_t len, SHashObj* pHash, void** pOutput);
void tfreeSParseQueryRes(void* p); void tfreeSParseQueryRes(void* p);
#ifdef TD_ENTERPRISE #ifdef TD_ENTERPRISE
int32_t translateView(STranslateContext* pCxt, SNode** pTable, SName* pName); int32_t translateView(STranslateContext* pCxt, SNode** pTable, SName* pName, bool inJoin);
int32_t getViewMetaFromMetaCache(STranslateContext* pCxt, SName* pName, SViewMeta** ppViewMeta); int32_t getViewMetaFromMetaCache(STranslateContext* pCxt, SName* pName, SViewMeta** ppViewMeta);
#endif #endif
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -118,6 +118,39 @@ static int32_t calcConstCondition(SCalcConstContext* pCxt, SNode** pNode) {
return code; return code;
} }
static EDealRes rewriteCalcConstValue(SNode** pNode, void* pContext) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
SCalcConstContext* pCtx = (SCalcConstContext*)pContext;
if (QUERY_NODE_LOGIC_CONDITION == nodeType(*pNode)) {
return DEAL_RES_CONTINUE;
} else if (QUERY_NODE_OPERATOR == nodeType(*pNode)) {
SOperatorNode* pOp = (SOperatorNode*)*pNode;
if (OP_TYPE_EQUAL == pOp->opType && (TSDB_DATA_TYPE_TIMESTAMP == ((SExprNode*)pOp->pLeft)->resType.type || TSDB_DATA_TYPE_TIMESTAMP == ((SExprNode*)pOp->pRight)->resType.type)) {
code = calcConstNode(&pOp->pLeft);
if (TSDB_CODE_SUCCESS == code) {
code = calcConstNode(&pOp->pRight);
}
goto _end;
}
}
if (TSDB_CODE_SUCCESS == code) {
code = calcConstCondition(pCtx, pNode);
}
_end:
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
return DEAL_RES_ERROR;
}
return DEAL_RES_IGNORE_CHILD;
}
static int32_t rewriteConditionForFromTable(SCalcConstContext* pCxt, SNode* pTable) { static int32_t rewriteConditionForFromTable(SCalcConstContext* pCxt, SNode* pTable) {
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
switch (nodeType(pTable)) { switch (nodeType(pTable)) {
@ -127,13 +160,32 @@ static int32_t rewriteConditionForFromTable(SCalcConstContext* pCxt, SNode* pTab
} }
case QUERY_NODE_JOIN_TABLE: { case QUERY_NODE_JOIN_TABLE: {
SJoinTableNode* pJoin = (SJoinTableNode*)pTable; SJoinTableNode* pJoin = (SJoinTableNode*)pTable;
SNode* pCond = NULL;
code = rewriteConditionForFromTable(pCxt, pJoin->pLeft); code = rewriteConditionForFromTable(pCxt, pJoin->pLeft);
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = rewriteConditionForFromTable(pCxt, pJoin->pRight); code = rewriteConditionForFromTable(pCxt, pJoin->pRight);
} }
if (TSDB_CODE_SUCCESS == code && NULL != pJoin->pOnCond) {
code = rewriteCondition(pCxt, &pJoin->pOnCond);
}
if (TSDB_CODE_SUCCESS == code && NULL != pJoin->pOnCond) {
nodesRewriteExpr(&pJoin->pOnCond, rewriteCalcConstValue, pCxt);
}
/*
if (TSDB_CODE_SUCCESS == code && NULL != pJoin->pOnCond) {
code = nodesCloneNode(pJoin->pOnCond, &pCond);
}
if (TSDB_CODE_SUCCESS == code && NULL != pJoin->pOnCond) { if (TSDB_CODE_SUCCESS == code && NULL != pJoin->pOnCond) {
code = calcConstCondition(pCxt, &pJoin->pOnCond); code = calcConstCondition(pCxt, &pJoin->pOnCond);
} }
if (TSDB_CODE_SUCCESS == code && pJoin->pOnCond && QUERY_NODE_VALUE == nodeType(pJoin->pOnCond)) {
nodesDestroyNode(pJoin->pOnCond);
pJoin->pOnCond = pCond;
pCond = NULL;
}
nodesDestroyNode(pCond);
*/
// todo empty table // todo empty table
break; break;
} }
@ -207,6 +259,7 @@ static int32_t findAndReplaceNode(SCalcConstContext* pCxt, SNode** pRoot, SNode*
static int32_t calcConstProject(SCalcConstContext* pCxt, SNode* pProject, bool dual, SNode** pNew) { static int32_t calcConstProject(SCalcConstContext* pCxt, SNode* pProject, bool dual, SNode** pNew) {
SArray* pAssociation = NULL; SArray* pAssociation = NULL;
if (NULL != ((SExprNode*)pProject)->pAssociation) { if (NULL != ((SExprNode*)pProject)->pAssociation) {
pAssociation = taosArrayDup(((SExprNode*)pProject)->pAssociation, NULL); pAssociation = taosArrayDup(((SExprNode*)pProject)->pAssociation, NULL);
if (NULL == pAssociation) { if (NULL == pAssociation) {
@ -214,7 +267,7 @@ static int32_t calcConstProject(SCalcConstContext* pCxt, SNode* pProject, bool d
} }
} }
char aliasName[TSDB_COL_NAME_LEN] = {0}; char aliasName[TSDB_COL_NAME_LEN] = {0}, srcTable[TSDB_TABLE_NAME_LEN] = {0};
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
if (dual) { if (dual) {
code = scalarCalculateConstantsFromDual(pProject, pNew); code = scalarCalculateConstantsFromDual(pProject, pNew);
@ -227,8 +280,16 @@ static int32_t calcConstProject(SCalcConstContext* pCxt, SNode* pProject, bool d
for (int32_t i = 0; i < size; ++i) { for (int32_t i = 0; i < size; ++i) {
SAssociationNode* pAssNode = taosArrayGet(pAssociation, i); SAssociationNode* pAssNode = taosArrayGet(pAssociation, i);
SNode** pCol = pAssNode->pPlace; SNode** pCol = pAssNode->pPlace;
if (((SExprNode*)pAssNode->pAssociationNode)->joinSrc) {
//((SExprNode*)pAssNode->pAssociationNode)->constValue = true;
continue;
}
if (*pCol == pAssNode->pAssociationNode) { if (*pCol == pAssNode->pAssociationNode) {
tstrncpy(aliasName, ((SExprNode*)*pCol)->aliasName, TSDB_COL_NAME_LEN); tstrncpy(aliasName, ((SExprNode*)*pCol)->aliasName, TSDB_COL_NAME_LEN);
if (QUERY_NODE_COLUMN == nodeType(*pCol)) {
tstrncpy(srcTable, ((SColumnNode*)*pCol)->tableAlias, TSDB_TABLE_NAME_LEN);
}
SArray* pOrigAss = NULL; SArray* pOrigAss = NULL;
TSWAP(((SExprNode*)*pCol)->pAssociation, pOrigAss); TSWAP(((SExprNode*)*pCol)->pAssociation, pOrigAss);
nodesDestroyNode(*pCol); nodesDestroyNode(*pCol);
@ -236,6 +297,9 @@ static int32_t calcConstProject(SCalcConstContext* pCxt, SNode* pProject, bool d
code = nodesCloneNode(*pNew, pCol); code = nodesCloneNode(*pNew, pCol);
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
tstrncpy(((SExprNode*)*pCol)->aliasName, aliasName, TSDB_COL_NAME_LEN); tstrncpy(((SExprNode*)*pCol)->aliasName, aliasName, TSDB_COL_NAME_LEN);
if (srcTable[0]) {
tstrncpy(((SExprNode*)*pCol)->srcTable, srcTable, TSDB_TABLE_NAME_LEN);
}
TSWAP(pOrigAss, ((SExprNode*)*pCol)->pAssociation); TSWAP(pOrigAss, ((SExprNode*)*pCol)->pAssociation);
} }
taosArrayDestroy(pOrigAss); taosArrayDestroy(pOrigAss);

View File

@ -1383,7 +1383,7 @@ static void setColumnInfoBySchema(const SRealTableNode* pTable, const SSchema* p
pCol->numOfPKs = pTable->pMeta->tableInfo.numOfPKs; pCol->numOfPKs = pTable->pMeta->tableInfo.numOfPKs;
} }
static int32_t setColumnInfoByExpr(STempTableNode* pTable, SExprNode* pExpr, SColumnNode** pColRef) { static int32_t setColumnInfoByExpr(STempTableNode* pTable, SExprNode* pExpr, SColumnNode** pColRef, bool joinSrc) {
SColumnNode* pCol = *pColRef; SColumnNode* pCol = *pColRef;
if (NULL == pExpr->pAssociation) { if (NULL == pExpr->pAssociation) {
@ -1413,6 +1413,7 @@ static int32_t setColumnInfoByExpr(STempTableNode* pTable, SExprNode* pExpr, SCo
tstrncpy(pCol->node.userAlias, pExpr->userAlias, TSDB_COL_NAME_LEN); tstrncpy(pCol->node.userAlias, pExpr->userAlias, TSDB_COL_NAME_LEN);
} }
pCol->node.resType = pExpr->resType; pCol->node.resType = pExpr->resType;
pCol->node.joinSrc = pTable->table.inJoin && joinSrc;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -1494,7 +1495,7 @@ static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* p
code = nodesListStrictAppend(pList, (SNode*)pCol); code = nodesListStrictAppend(pList, (SNode*)pCol);
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
SListCell* pCell = nodesListGetCell(pList, LIST_LENGTH(pList) - 1); SListCell* pCell = nodesListGetCell(pList, LIST_LENGTH(pList) - 1);
code = setColumnInfoByExpr(pTempTable, (SExprNode*)pNode, (SColumnNode**)&pCell->pNode); code = setColumnInfoByExpr(pTempTable, (SExprNode*)pNode, (SColumnNode**)&pCell->pNode, true);
} }
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
if (!skipProjRef) if (!skipProjRef)
@ -1591,7 +1592,7 @@ static int32_t findAndSetColumn(STranslateContext* pCxt, SColumnNode** pColRef,
} }
} }
if (pFoundExpr) { if (pFoundExpr) {
code = setColumnInfoByExpr(pTempTable, pFoundExpr, pColRef); code = setColumnInfoByExpr(pTempTable, pFoundExpr, pColRef, SQL_CLAUSE_FROM != pCxt->currClause);
if (TSDB_CODE_SUCCESS != code) { if (TSDB_CODE_SUCCESS != code) {
return code; return code;
} }
@ -4753,14 +4754,25 @@ static int32_t checkJoinTable(STranslateContext* pCxt, SJoinTableNode* pJoinTabl
} }
} }
if ((QUERY_NODE_TEMP_TABLE == nodeType(pJoinTable->pLeft) && if (QUERY_NODE_TEMP_TABLE == nodeType(pJoinTable->pLeft) &&
!isGlobalTimeLineQuery(((STempTableNode*)pJoinTable->pLeft)->pSubquery)) || !isGlobalTimeLineQuery(((STempTableNode*)pJoinTable->pLeft)->pSubquery)) {
(QUERY_NODE_TEMP_TABLE == nodeType(pJoinTable->pRight) && if (IS_ASOF_JOIN(pJoinTable->subType) || IS_WINDOW_JOIN(pJoinTable->subType)) {
!isGlobalTimeLineQuery(((STempTableNode*)pJoinTable->pRight)->pSubquery))) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_SUPPORT_JOIN,
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_SUPPORT_JOIN, "Join requires valid time series input");
"Join requires valid time series input"); }
pJoinTable->leftNoOrderedSubQuery = true;
} }
if (QUERY_NODE_TEMP_TABLE == nodeType(pJoinTable->pRight) &&
!isGlobalTimeLineQuery(((STempTableNode*)pJoinTable->pRight)->pSubquery)) {
if (IS_ASOF_JOIN(pJoinTable->subType) || IS_WINDOW_JOIN(pJoinTable->subType)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_SUPPORT_JOIN,
"Join requires valid time series input");
}
pJoinTable->rightNoOrderedSubQuery = true;
}
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -5088,9 +5100,141 @@ static int32_t setJoinTimeLineResMode(STranslateContext* pCxt) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t translateTable(STranslateContext* pCxt, SNode** pTable, SNode* pJoinParent) { int32_t mergeInnerJoinConds(SNode** ppDst, SNode** ppSrc) {
SNode* pNew = NULL;
int32_t code = TSDB_CODE_SUCCESS;
while (true) {
if (QUERY_NODE_LOGIC_CONDITION == nodeType(*ppDst) && ((SLogicConditionNode*)*ppDst)->condType == LOGIC_COND_TYPE_AND) {
SLogicConditionNode* pLogic = (SLogicConditionNode*)*ppDst;
if (QUERY_NODE_LOGIC_CONDITION == nodeType(*ppSrc) && ((SLogicConditionNode*)*ppSrc)->condType == LOGIC_COND_TYPE_AND) {
SLogicConditionNode* pSrcLogic = (SLogicConditionNode*)*ppSrc;
code = nodesListMakeStrictAppendList(&pLogic->pParameterList, pSrcLogic->pParameterList);
if (TSDB_CODE_SUCCESS == code) {
pSrcLogic->pParameterList = NULL;
nodesDestroyNode(*ppSrc);
*ppSrc = NULL;
}
} else {
code = nodesListMakeStrictAppend(&pLogic->pParameterList, *ppSrc);
if (TSDB_CODE_SUCCESS == code) {
*ppSrc = NULL;
}
}
return code;
}
if (TSDB_CODE_SUCCESS == code && QUERY_NODE_LOGIC_CONDITION == nodeType(*ppSrc) && ((SLogicConditionNode*)*ppSrc)->condType == LOGIC_COND_TYPE_AND) {
SNode* pTmp = *ppDst;
*ppDst = *ppSrc;
*ppSrc = pTmp;
continue;
}
if (TSDB_CODE_SUCCESS == code) {
code = nodesMakeNode(QUERY_NODE_LOGIC_CONDITION, &pNew);
}
if (TSDB_CODE_SUCCESS == code) {
SLogicConditionNode* pLogic = (SLogicConditionNode*)pNew;
pLogic->condType = LOGIC_COND_TYPE_AND;
pLogic->node.resType.type = TSDB_DATA_TYPE_BOOL;
pLogic->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_BOOL].bytes;
code = nodesListMakeStrictAppend(&pLogic->pParameterList, *ppSrc);
if (TSDB_CODE_SUCCESS == code) {
*ppSrc = *ppDst;
*ppDst = pNew;
continue;
}
}
if (code) {
break;
}
}
return code;
}
bool isColumnExpr(SNode* pNode) {
SExprNode* pExpr = (SExprNode*)pNode;
if (QUERY_NODE_COLUMN != nodeType(pNode) && QUERY_NODE_FUNCTION != nodeType(pNode)) {
return false;
}
if (QUERY_NODE_FUNCTION == nodeType(pNode)) {
SFunctionNode* pFunc = (SFunctionNode*)pNode;
if (FUNCTION_TYPE_TIMETRUNCATE != pFunc->funcType && strcasecmp(((SFunctionNode*)pNode)->functionName, "timetruncate")) {
return false;
}
if (!nodesContainsColumn(nodesListGetNode(pFunc->pParameterList, 0))) {
return false;
}
}
return true;
}
int32_t splitJoinColPrimaryCond(SNode** ppSrc, SNode** ppDst) {
if (NULL == *ppSrc) {
return TSDB_CODE_SUCCESS;
}
int32_t code = 0;
switch (nodeType(*ppSrc)) {
case QUERY_NODE_OPERATOR: {
SOperatorNode* pOp = (SOperatorNode*)*ppSrc;
if (OP_TYPE_EQUAL != pOp->opType) {
break;
}
if (isColumnExpr(pOp->pLeft) && isColumnExpr(pOp->pRight)) {
TSWAP(*ppSrc, *ppDst);
}
break;
}
case QUERY_NODE_LOGIC_CONDITION: {
SLogicConditionNode* pLogic = (SLogicConditionNode*)*ppSrc;
if (LOGIC_COND_TYPE_AND != pLogic->condType) {
break;
}
SNode* pTmp = NULL;
SNode* pTmpRes = NULL;
WHERE_EACH(pTmp, pLogic->pParameterList) {
code = splitJoinColPrimaryCond(&pTmp, &pTmpRes);
if (code) {
break;
}
if (NULL == pTmp && NULL != pTmpRes) {
cell->pNode = NULL;
ERASE_NODE(pLogic->pParameterList);
code = nodesMergeNode(ppDst, &pTmpRes);
if (code) {
break;
}
continue;
}
WHERE_NEXT;
}
if (pLogic->pParameterList->length <= 0) {
nodesDestroyNode(*ppSrc);
*ppSrc = NULL;
}
break;
}
default:
break;
}
return code;
}
int32_t translateTable(STranslateContext* pCxt, SNode** pTable, bool inJoin) {
SSelectStmt* pCurrSmt = (SSelectStmt*)(pCxt->pCurrStmt); SSelectStmt* pCurrSmt = (SSelectStmt*)(pCxt->pCurrStmt);
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
((STableNode*)*pTable)->inJoin = inJoin;
switch (nodeType(*pTable)) { switch (nodeType(*pTable)) {
case QUERY_NODE_REAL_TABLE: { case QUERY_NODE_REAL_TABLE: {
SRealTableNode* pRealTable = (SRealTableNode*)*pTable; SRealTableNode* pRealTable = (SRealTableNode*)*pTable;
@ -5106,7 +5250,7 @@ int32_t translateTable(STranslateContext* pCxt, SNode** pTable, SNode* pJoinPare
} }
#ifdef TD_ENTERPRISE #ifdef TD_ENTERPRISE
if (TSDB_VIEW_TABLE == pRealTable->pMeta->tableType && (!pCurrSmt->tagScan || pCxt->pParseCxt->biMode)) { if (TSDB_VIEW_TABLE == pRealTable->pMeta->tableType && (!pCurrSmt->tagScan || pCxt->pParseCxt->biMode)) {
return translateView(pCxt, pTable, &name); return translateView(pCxt, pTable, &name, inJoin);
} }
code = translateAudit(pCxt, pRealTable, &name); code = translateAudit(pCxt, pRealTable, &name);
#endif #endif
@ -5163,14 +5307,27 @@ int32_t translateTable(STranslateContext* pCxt, SNode** pTable, SNode* pJoinPare
SJoinTableNode* pJoinTable = (SJoinTableNode*)*pTable; SJoinTableNode* pJoinTable = (SJoinTableNode*)*pTable;
code = translateJoinTable(pCxt, pJoinTable); code = translateJoinTable(pCxt, pJoinTable);
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = translateTable(pCxt, &pJoinTable->pLeft, (SNode*)pJoinTable); code = translateTable(pCxt, &pJoinTable->pLeft, true);
} }
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = translateTable(pCxt, &pJoinTable->pRight, (SNode*)pJoinTable); code = translateTable(pCxt, &pJoinTable->pRight, true);
} }
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = checkJoinTable(pCxt, pJoinTable); code = checkJoinTable(pCxt, pJoinTable);
} }
if (TSDB_CODE_SUCCESS == code && !inJoin && pCurrSmt->pWhere && JOIN_TYPE_INNER == pJoinTable->joinType) {
SNode* pPrimCond = NULL;
code = splitJoinColPrimaryCond(&pCurrSmt->pWhere, &pPrimCond);
if (TSDB_CODE_SUCCESS == code && pPrimCond) {
if (pJoinTable->pOnCond) {
code = mergeInnerJoinConds(&pJoinTable->pOnCond, &pPrimCond);
} else {
pJoinTable->pOnCond = pPrimCond;
pPrimCond = NULL;
}
}
nodesDestroyNode(pPrimCond);
}
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
pJoinTable->table.precision = calcJoinTablePrecision(pJoinTable); pJoinTable->table.precision = calcJoinTablePrecision(pJoinTable);
pJoinTable->table.singleTable = joinTableIsSingleTable(pJoinTable); pJoinTable->table.singleTable = joinTableIsSingleTable(pJoinTable);
@ -7123,7 +7280,7 @@ static int32_t translateWhere(STranslateContext* pCxt, SSelectStmt* pSelect) {
static int32_t translateFrom(STranslateContext* pCxt, SNode** pTable) { static int32_t translateFrom(STranslateContext* pCxt, SNode** pTable) {
pCxt->currClause = SQL_CLAUSE_FROM; pCxt->currClause = SQL_CLAUSE_FROM;
return translateTable(pCxt, pTable, NULL); return translateTable(pCxt, pTable, false);
} }
static int32_t checkLimit(STranslateContext* pCxt, SSelectStmt* pSelect) { static int32_t checkLimit(STranslateContext* pCxt, SSelectStmt* pSelect) {
@ -7555,7 +7712,7 @@ static EDealRes rewriteSingleColsFunc(SNode** pNode, void* pContext) {
} }
if (pFunc->node.asAlias) { if (pFunc->node.asAlias) {
if (((SExprNode*)pExpr)->asAlias) { if (((SExprNode*)pExpr)->asAlias) {
pCxt->status = TSDB_CODE_INVALID_COLS_ALIAS; pCxt->status = TSDB_CODE_PAR_INVALID_COLS_ALIAS;
parserError("%s Invalid using alias for cols function", __func__); parserError("%s Invalid using alias for cols function", __func__);
return DEAL_RES_ERROR; return DEAL_RES_ERROR;
} else { } else {
@ -7664,7 +7821,7 @@ static int32_t rewriteColsFunction(STranslateContext* pCxt, SNodeList** nodeList
if (isMultiColsFuncNode(pTmpNode)) { if (isMultiColsFuncNode(pTmpNode)) {
SFunctionNode* pFunc = (SFunctionNode*)pTmpNode; SFunctionNode* pFunc = (SFunctionNode*)pTmpNode;
if(pFunc->node.asAlias) { if(pFunc->node.asAlias) {
code = TSDB_CODE_INVALID_COLS_ALIAS; code = TSDB_CODE_PAR_INVALID_COLS_ALIAS;
parserError("%s Invalid using alias for cols function", __func__); parserError("%s Invalid using alias for cols function", __func__);
goto _end; goto _end;
} }

View File

@ -600,6 +600,9 @@ static int32_t createJoinLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
pJoin->node.requireDataOrder = pJoin->hashJoinHint ? DATA_ORDER_LEVEL_NONE : DATA_ORDER_LEVEL_GLOBAL; pJoin->node.requireDataOrder = pJoin->hashJoinHint ? DATA_ORDER_LEVEL_NONE : DATA_ORDER_LEVEL_GLOBAL;
pJoin->node.resultDataOrder = DATA_ORDER_LEVEL_NONE; pJoin->node.resultDataOrder = DATA_ORDER_LEVEL_NONE;
pJoin->isLowLevelJoin = pJoinTable->isLowLevelJoin; pJoin->isLowLevelJoin = pJoinTable->isLowLevelJoin;
pJoin->leftNoOrderedSubQuery = pJoinTable->leftNoOrderedSubQuery;
pJoin->rightNoOrderedSubQuery = pJoinTable->leftNoOrderedSubQuery;
code = nodesCloneNode(pJoinTable->pWindowOffset, &pJoin->pWindowOffset); code = nodesCloneNode(pJoinTable->pWindowOffset, &pJoin->pWindowOffset);
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = nodesCloneNode(pJoinTable->pJLimit, &pJoin->pJLimit); code = nodesCloneNode(pJoinTable->pJLimit, &pJoin->pJLimit);

View File

@ -20,6 +20,7 @@
#include "systable.h" #include "systable.h"
#include "tglobal.h" #include "tglobal.h"
#include "ttime.h" #include "ttime.h"
#include "scalar.h"
#define OPTIMIZE_FLAG_MASK(n) (1 << n) #define OPTIMIZE_FLAG_MASK(n) (1 << n)
@ -502,52 +503,12 @@ static int32_t scanPathOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSub
return code; return code;
} }
static int32_t pdcMergeCondsToLogic(SNode** pDst, SNode** pSrc) {
SLogicConditionNode* pLogicCond = NULL;
int32_t code = nodesMakeNode(QUERY_NODE_LOGIC_CONDITION, (SNode**)&pLogicCond);
if (NULL == pLogicCond) {
return code;
}
pLogicCond->node.resType.type = TSDB_DATA_TYPE_BOOL;
pLogicCond->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_BOOL].bytes;
pLogicCond->condType = LOGIC_COND_TYPE_AND;
code = nodesListMakeAppend(&pLogicCond->pParameterList, *pSrc);
if (TSDB_CODE_SUCCESS == code) {
*pSrc = NULL;
code = nodesListMakeAppend(&pLogicCond->pParameterList, *pDst);
}
if (TSDB_CODE_SUCCESS == code) {
*pDst = (SNode*)pLogicCond;
} else {
nodesDestroyNode((SNode*)pLogicCond);
}
return code;
}
static int32_t pdcMergeConds(SNode** pCond, SNode** pAdditionalCond) {
if (NULL == *pCond) {
TSWAP(*pCond, *pAdditionalCond);
return TSDB_CODE_SUCCESS;
}
int32_t code = TSDB_CODE_SUCCESS;
if (QUERY_NODE_LOGIC_CONDITION == nodeType(*pCond) &&
LOGIC_COND_TYPE_AND == ((SLogicConditionNode*)*pCond)->condType) {
code = nodesListAppend(((SLogicConditionNode*)*pCond)->pParameterList, *pAdditionalCond);
if (TSDB_CODE_SUCCESS == code) {
*pAdditionalCond = NULL;
}
} else {
code = pdcMergeCondsToLogic(pCond, pAdditionalCond);
}
return code;
}
static int32_t pushDownCondOptCalcTimeRange(SOptimizeContext* pCxt, SScanLogicNode* pScan, SNode** pPrimaryKeyCond, static int32_t pushDownCondOptCalcTimeRange(SOptimizeContext* pCxt, SScanLogicNode* pScan, SNode** pPrimaryKeyCond,
SNode** pOtherCond) { SNode** pOtherCond) {
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
if (pCxt->pPlanCxt->topicQuery || pCxt->pPlanCxt->streamQuery) { if (pCxt->pPlanCxt->topicQuery || pCxt->pPlanCxt->streamQuery) {
code = pdcMergeConds(pOtherCond, pPrimaryKeyCond); code = nodesMergeNode(pOtherCond, pPrimaryKeyCond);
} else { } else {
bool isStrict = false; bool isStrict = false;
code = filterGetTimeRange(*pPrimaryKeyCond, &pScan->scanRange, &isStrict); code = filterGetTimeRange(*pPrimaryKeyCond, &pScan->scanRange, &isStrict);
@ -555,7 +516,7 @@ static int32_t pushDownCondOptCalcTimeRange(SOptimizeContext* pCxt, SScanLogicNo
if (isStrict) { if (isStrict) {
nodesDestroyNode(*pPrimaryKeyCond); nodesDestroyNode(*pPrimaryKeyCond);
} else { } else {
code = pdcMergeConds(pOtherCond, pPrimaryKeyCond); code = nodesMergeNode(pOtherCond, pPrimaryKeyCond);
} }
*pPrimaryKeyCond = NULL; *pPrimaryKeyCond = NULL;
} }
@ -629,8 +590,16 @@ static bool pdcJoinColInTableColList(SNode* pNode, SNodeList* pTableCols) {
} }
static bool pdcJoinColInTableList(SNode* pCondCol, SSHashObj* pTables) { static bool pdcJoinColInTableList(SNode* pCondCol, SSHashObj* pTables) {
SColumnNode* pTableCol = (SColumnNode*)pCondCol; char* pTableAlias = NULL;
if (NULL == tSimpleHashGet(pTables, pTableCol->tableAlias, strlen(pTableCol->tableAlias))) { if (QUERY_NODE_COLUMN == nodeType(pCondCol)) {
SColumnNode* pTableCol = (SColumnNode*)pCondCol;
pTableAlias = pTableCol->tableAlias;
} else if (QUERY_NODE_VALUE == nodeType(pCondCol)) {
SValueNode* pVal = (SValueNode*)pCondCol;
pTableAlias = pVal->node.srcTable;
}
if (NULL == tSimpleHashGet(pTables, pTableAlias, strlen(pTableAlias))) {
return false; return false;
} }
return true; return true;
@ -819,15 +788,27 @@ static int32_t pdcJoinSplitCond(SJoinLogicNode* pJoin, SNode** pSrcCond, SNode**
} }
static int32_t pdcJoinPushDownOnCond(SOptimizeContext* pCxt, SJoinLogicNode* pJoin, SNode** pCond) { static int32_t pdcJoinPushDownOnCond(SOptimizeContext* pCxt, SJoinLogicNode* pJoin, SNode** pCond) {
return pdcMergeConds(&pJoin->pFullOnCond, pCond); return nodesMergeNode(&pJoin->pFullOnCond, pCond);
} }
static int32_t pdcPushDownCondToChild(SOptimizeContext* pCxt, SLogicNode* pChild, SNode** pCond) { static int32_t pdcPushDownCondToChild(SOptimizeContext* pCxt, SLogicNode* pChild, SNode** pCond) {
return pdcMergeConds(&pChild->pConditions, pCond); return nodesMergeNode(&pChild->pConditions, pCond);
} }
static bool pdcJoinIsPrim(SNode* pNode, SSHashObj* pTables) { static bool pdcJoinIsPrim(SNode* pNode, SSHashObj* pTables, bool constAsPrim, bool* constPrimGot) {
if (QUERY_NODE_COLUMN != nodeType(pNode) && QUERY_NODE_FUNCTION != nodeType(pNode)) { if (QUERY_NODE_COLUMN != nodeType(pNode) && QUERY_NODE_FUNCTION != nodeType(pNode) && (!constAsPrim || QUERY_NODE_VALUE != nodeType(pNode))) {
return false;
}
if (QUERY_NODE_VALUE == nodeType(pNode)) {
SValueNode* pVal = (SValueNode*)pNode;
if (TSDB_DATA_TYPE_NULL != pVal->node.resType.type && !pVal->isNull) {
if (pdcJoinColInTableList(pNode, pTables)) {
*constPrimGot = true;
return true;
}
}
return false; return false;
} }
@ -850,7 +831,7 @@ static bool pdcJoinIsPrim(SNode* pNode, SSHashObj* pTables) {
return pdcJoinColInTableList(pNode, pTables); return pdcJoinColInTableList(pNode, pTables);
} }
static bool pdcJoinIsPrimEqualCond(SJoinLogicNode* pJoin, SNode* pCond) { static bool pdcJoinIsPrimEqualCond(SJoinLogicNode* pJoin, SNode* pCond, bool constAsPrim) {
if (QUERY_NODE_OPERATOR != nodeType(pCond)) { if (QUERY_NODE_OPERATOR != nodeType(pCond)) {
return false; return false;
} }
@ -878,11 +859,14 @@ static bool pdcJoinIsPrimEqualCond(SJoinLogicNode* pJoin, SNode* pCond) {
return code; return code;
} }
bool res = false; bool res = false, constGot = false;
if (pdcJoinIsPrim(pOper->pLeft, pLeftTables)) { if (pdcJoinIsPrim(pOper->pLeft, pLeftTables, constAsPrim, &pJoin->leftConstPrimGot)) {
res = pdcJoinIsPrim(pOper->pRight, pRightTables); res = pdcJoinIsPrim(pOper->pRight, pRightTables, constAsPrim, &pJoin->rightConstPrimGot);
} else if (pdcJoinIsPrim(pOper->pLeft, pRightTables)) { } else if (pdcJoinIsPrim(pOper->pLeft, pRightTables, constAsPrim, &pJoin->rightConstPrimGot)) {
res = pdcJoinIsPrim(pOper->pRight, pLeftTables); res = pdcJoinIsPrim(pOper->pRight, pLeftTables, constAsPrim, &pJoin->leftConstPrimGot);
if (pJoin->rightConstPrimGot || pJoin->leftConstPrimGot) {
TSWAP(pOper->pLeft, pOper->pRight);
}
} }
tSimpleHashCleanup(pLeftTables); tSimpleHashCleanup(pLeftTables);
@ -910,24 +894,26 @@ static bool pdcJoinHasPrimEqualCond(SJoinLogicNode* pJoin, SNode* pCond, bool* e
} }
return hasPrimaryKeyEqualCond; return hasPrimaryKeyEqualCond;
} else { } else {
return pdcJoinIsPrimEqualCond(pJoin, pCond); return pdcJoinIsPrimEqualCond(pJoin, pCond, false);
} }
} }
static int32_t pdcJoinSplitPrimInLogicCond(SJoinLogicNode* pJoin, SNode** ppPrimEqCond, SNode** ppOnCond) { static int32_t pdcJoinSplitPrimInLogicCond(SJoinLogicNode* pJoin, SNode** ppInput, SNode** ppPrimEqCond, SNode** ppOnCond, bool constAsPrim) {
SLogicConditionNode* pLogicCond = (SLogicConditionNode*)(pJoin->pFullOnCond); SLogicConditionNode* pLogicCond = (SLogicConditionNode*)(*ppInput);
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
SNodeList* pOnConds = NULL; SNodeList* pOnConds = NULL;
SNode* pCond = NULL; SNode* pCond = NULL;
WHERE_EACH(pCond, pLogicCond->pParameterList) { WHERE_EACH(pCond, pLogicCond->pParameterList) {
SNode* pNew = NULL; SNode* pNew = NULL;
code = nodesCloneNode(pCond, &pNew); if (pdcJoinIsPrimEqualCond(pJoin, pCond, constAsPrim) && (NULL == *ppPrimEqCond) && (!constAsPrim || pJoin->leftConstPrimGot || pJoin->rightConstPrimGot)) {
if (TSDB_CODE_SUCCESS != code) break; code = nodesCloneNode(pCond, &pNew);
if (pdcJoinIsPrimEqualCond(pJoin, pCond) && (NULL == *ppPrimEqCond)) { if (TSDB_CODE_SUCCESS != code) break;
*ppPrimEqCond = pNew; *ppPrimEqCond = pNew;
ERASE_NODE(pLogicCond->pParameterList); ERASE_NODE(pLogicCond->pParameterList);
} else { } else {
code = nodesCloneNode(pCond, &pNew);
if (TSDB_CODE_SUCCESS != code) break;
code = nodesListMakeAppend(&pOnConds, pNew); code = nodesListMakeAppend(&pOnConds, pNew);
if (TSDB_CODE_SUCCESS != code) break; if (TSDB_CODE_SUCCESS != code) break;
WHERE_NEXT; WHERE_NEXT;
@ -942,10 +928,11 @@ static int32_t pdcJoinSplitPrimInLogicCond(SJoinLogicNode* pJoin, SNode** ppPrim
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
if (NULL != *ppPrimEqCond) { if (NULL != *ppPrimEqCond) {
*ppOnCond = pTempOnCond; *ppOnCond = pTempOnCond;
nodesDestroyNode(pJoin->pFullOnCond); nodesDestroyNode(*ppInput);
pJoin->pFullOnCond = NULL; *ppInput = NULL;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
nodesDestroyNode(pTempOnCond);
planError("no primary key equal cond found, condListNum:%d", pLogicCond->pParameterList->length); planError("no primary key equal cond found, condListNum:%d", pLogicCond->pParameterList->length);
return TSDB_CODE_PLAN_INTERNAL_ERROR; return TSDB_CODE_PLAN_INTERNAL_ERROR;
} else { } else {
@ -962,8 +949,8 @@ static int32_t pdcJoinSplitPrimEqCond(SOptimizeContext* pCxt, SJoinLogicNode* pJ
if (QUERY_NODE_LOGIC_CONDITION == nodeType(pJoin->pFullOnCond) && if (QUERY_NODE_LOGIC_CONDITION == nodeType(pJoin->pFullOnCond) &&
LOGIC_COND_TYPE_AND == ((SLogicConditionNode*)(pJoin->pFullOnCond))->condType) { LOGIC_COND_TYPE_AND == ((SLogicConditionNode*)(pJoin->pFullOnCond))->condType) {
code = pdcJoinSplitPrimInLogicCond(pJoin, &pPrimKeyEqCond, &pJoinOnCond); code = pdcJoinSplitPrimInLogicCond(pJoin, &pJoin->pFullOnCond, &pPrimKeyEqCond, &pJoinOnCond, false);
} else if (pdcJoinIsPrimEqualCond(pJoin, pJoin->pFullOnCond)) { } else if (pdcJoinIsPrimEqualCond(pJoin, pJoin->pFullOnCond, false)) {
pPrimKeyEqCond = pJoin->pFullOnCond; pPrimKeyEqCond = pJoin->pFullOnCond;
pJoinOnCond = NULL; pJoinOnCond = NULL;
} else { } else {
@ -1412,6 +1399,37 @@ static int32_t pdcJoinAddFilterColsToTarget(SOptimizeContext* pCxt, SJoinLogicNo
return code; return code;
} }
static int32_t pdcJoinSplitConstPrimEqCond(SOptimizeContext* pCxt, SJoinLogicNode* pJoin, SNode** ppCond) {
int32_t code = TSDB_CODE_SUCCESS;
SNode* pPrimKeyEqCond = NULL;
SNode* pJoinOnCond = NULL;
if (QUERY_NODE_LOGIC_CONDITION == nodeType(*ppCond) &&
LOGIC_COND_TYPE_AND == ((SLogicConditionNode*)*ppCond)->condType) {
code = pdcJoinSplitPrimInLogicCond(pJoin, ppCond, &pPrimKeyEqCond, &pJoinOnCond, true);
} else if (pdcJoinIsPrimEqualCond(pJoin, *ppCond, true) && (pJoin->leftConstPrimGot || pJoin->rightConstPrimGot)) {
pPrimKeyEqCond = *ppCond;
pJoinOnCond = NULL;
} else {
return TSDB_CODE_SUCCESS;
}
if (TSDB_CODE_SUCCESS == code) {
pJoin->pPrimKeyEqCond = pPrimKeyEqCond;
*ppCond = pJoinOnCond;
if (pJoin->rightConstPrimGot || pJoin->leftConstPrimGot) {
code = scalarConvertOpValueNodeTs((SOperatorNode*)pJoin->pPrimKeyEqCond);
}
} else {
nodesDestroyNode(pPrimKeyEqCond);
nodesDestroyNode(pJoinOnCond);
}
return code;
}
static int32_t pdcJoinCheckAllCond(SOptimizeContext* pCxt, SJoinLogicNode* pJoin) { static int32_t pdcJoinCheckAllCond(SOptimizeContext* pCxt, SJoinLogicNode* pJoin) {
if (NULL == pJoin->pFullOnCond) { if (NULL == pJoin->pFullOnCond) {
if (IS_WINDOW_JOIN(pJoin->subType) || IS_ASOF_JOIN(pJoin->subType)) { if (IS_WINDOW_JOIN(pJoin->subType) || IS_ASOF_JOIN(pJoin->subType)) {
@ -1427,9 +1445,10 @@ static int32_t pdcJoinCheckAllCond(SOptimizeContext* pCxt, SJoinLogicNode* pJoin
} }
} }
SNode* pCond = pJoin->pFullOnCond ? pJoin->pFullOnCond : pJoin->node.pConditions; SNode** ppCond = pJoin->pFullOnCond ? &pJoin->pFullOnCond : &pJoin->node.pConditions;
bool errCond = false; bool errCond = false;
if (!pdcJoinHasPrimEqualCond(pJoin, pCond, &errCond)) { bool primCondGot = pdcJoinHasPrimEqualCond(pJoin, *ppCond, &errCond);
if (!primCondGot) {
if (errCond && !(IS_INNER_NONE_JOIN(pJoin->joinType, pJoin->subType) && NULL != pJoin->pFullOnCond && if (errCond && !(IS_INNER_NONE_JOIN(pJoin->joinType, pJoin->subType) && NULL != pJoin->pFullOnCond &&
NULL != pJoin->node.pConditions)) { NULL != pJoin->node.pConditions)) {
return generateUsageErrMsg(pCxt->pPlanCxt->pMsg, pCxt->pPlanCxt->msgLen, TSDB_CODE_PLAN_NOT_SUPPORT_JOIN_COND); return generateUsageErrMsg(pCxt->pPlanCxt->pMsg, pCxt->pPlanCxt->msgLen, TSDB_CODE_PLAN_NOT_SUPPORT_JOIN_COND);
@ -1437,7 +1456,8 @@ static int32_t pdcJoinCheckAllCond(SOptimizeContext* pCxt, SJoinLogicNode* pJoin
if (IS_INNER_NONE_JOIN(pJoin->joinType, pJoin->subType) && NULL != pJoin->pFullOnCond && if (IS_INNER_NONE_JOIN(pJoin->joinType, pJoin->subType) && NULL != pJoin->pFullOnCond &&
NULL != pJoin->node.pConditions) { NULL != pJoin->node.pConditions) {
if (pdcJoinHasPrimEqualCond(pJoin, pJoin->node.pConditions, &errCond)) { primCondGot = pdcJoinHasPrimEqualCond(pJoin, pJoin->node.pConditions, &errCond);
if (primCondGot) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
if (errCond) { if (errCond) {
@ -1448,6 +1468,22 @@ static int32_t pdcJoinCheckAllCond(SOptimizeContext* pCxt, SJoinLogicNode* pJoin
if (IS_WINDOW_JOIN(pJoin->subType) || IS_ASOF_JOIN(pJoin->subType)) { if (IS_WINDOW_JOIN(pJoin->subType) || IS_ASOF_JOIN(pJoin->subType)) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
}
if (pJoin->leftNoOrderedSubQuery || pJoin->rightNoOrderedSubQuery || !primCondGot) {
pJoin->noPrimKeyEqCond = true;
int32_t code = pdcJoinSplitConstPrimEqCond(pCxt, pJoin, ppCond);
if (code || (pJoin->pPrimKeyEqCond)) {
return code;
}
if (IS_INNER_NONE_JOIN(pJoin->joinType, pJoin->subType) && NULL != pJoin->pFullOnCond &&
NULL != pJoin->node.pConditions) {
code = pdcJoinSplitConstPrimEqCond(pCxt, pJoin, &pJoin->node.pConditions);
if (code || pJoin->pPrimKeyEqCond) {
return code;
}
}
return generateUsageErrMsg(pCxt->pPlanCxt->pMsg, pCxt->pPlanCxt->msgLen, TSDB_CODE_PLAN_EXPECTED_TS_EQUAL); return generateUsageErrMsg(pCxt->pPlanCxt->pMsg, pCxt->pPlanCxt->msgLen, TSDB_CODE_PLAN_EXPECTED_TS_EQUAL);
} }
@ -1813,6 +1849,7 @@ static int32_t pdcRewriteTypeBasedOnJoinRes(SOptimizeContext* pCxt, SJoinLogicNo
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int32_t pdcDealJoin(SOptimizeContext* pCxt, SJoinLogicNode* pJoin) { static int32_t pdcDealJoin(SOptimizeContext* pCxt, SJoinLogicNode* pJoin) {
if (OPTIMIZE_FLAG_TEST_MASK(pJoin->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE)) { if (OPTIMIZE_FLAG_TEST_MASK(pJoin->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE)) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -1870,7 +1907,7 @@ static int32_t pdcDealJoin(SOptimizeContext* pCxt, SJoinLogicNode* pJoin) {
} }
if (TSDB_CODE_SUCCESS == code && NULL != pJoin->pFullOnCond && !IS_WINDOW_JOIN(pJoin->subType) && if (TSDB_CODE_SUCCESS == code && NULL != pJoin->pFullOnCond && !IS_WINDOW_JOIN(pJoin->subType) &&
NULL == pJoin->addPrimEqCond) { NULL == pJoin->addPrimEqCond && NULL == pJoin->pPrimKeyEqCond) {
code = pdcJoinSplitPrimEqCond(pCxt, pJoin); code = pdcJoinSplitPrimEqCond(pCxt, pJoin);
} }
@ -1993,7 +2030,7 @@ static int32_t partitionAggCond(SAggLogicNode* pAgg, SNode** ppAggFunCond, SNode
} }
static int32_t pushCondToAggCond(SOptimizeContext* pCxt, SAggLogicNode* pAgg, SNode** pAggFuncCond) { static int32_t pushCondToAggCond(SOptimizeContext* pCxt, SAggLogicNode* pAgg, SNode** pAggFuncCond) {
return pdcMergeConds(&pAgg->node.pConditions, pAggFuncCond); return nodesMergeNode(&pAgg->node.pConditions, pAggFuncCond);
} }
typedef struct SRewriteAggGroupKeyCondContext { typedef struct SRewriteAggGroupKeyCondContext {
@ -2118,12 +2155,15 @@ static int32_t pdcDealProject(SOptimizeContext* pCxt, SProjectLogicNode* pProjec
if (NULL != pProject->node.pLimit || NULL != pProject->node.pSlimit) { if (NULL != pProject->node.pLimit || NULL != pProject->node.pSlimit) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pProject->node.pChildren, 0);
if(pChild->pLimit != NULL) {
return TSDB_CODE_SUCCESS;
}
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
SNode* pProjCond = NULL; SNode* pProjCond = NULL;
code = rewriteProjectCondForPushDown(pCxt, pProject, &pProjCond); code = rewriteProjectCondForPushDown(pCxt, pProject, &pProjCond);
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pProject->node.pChildren, 0);
code = pdcPushDownCondToChild(pCxt, pChild, &pProjCond); code = pdcPushDownCondToChild(pCxt, pChild, &pProjCond);
} }
@ -2616,7 +2656,9 @@ static int32_t sortForJoinOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* pL
bool res = false; bool res = false;
SOperatorNode* pOp = (SOperatorNode*)pJoin->pPrimKeyEqCond; SOperatorNode* pOp = (SOperatorNode*)pJoin->pPrimKeyEqCond;
if (QUERY_NODE_COLUMN != nodeType(pOp->pLeft) || QUERY_NODE_COLUMN != nodeType(pOp->pRight)) {
if ((QUERY_NODE_COLUMN != nodeType(pOp->pLeft) && QUERY_NODE_VALUE != nodeType(pOp->pLeft)) ||
(QUERY_NODE_COLUMN != nodeType(pOp->pRight) && QUERY_NODE_VALUE != nodeType(pOp->pRight))) {
return TSDB_CODE_PLAN_INTERNAL_ERROR; return TSDB_CODE_PLAN_INTERNAL_ERROR;
} }
@ -2626,11 +2668,13 @@ static int32_t sortForJoinOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* pL
if (TSDB_CODE_SUCCESS != code) { if (TSDB_CODE_SUCCESS != code) {
return code; return code;
} }
if (NULL !=
tSimpleHashGet(pTables, ((SColumnNode*)pOp->pLeft)->tableAlias, strlen(((SColumnNode*)pOp->pLeft)->tableAlias))) { char* opLeftTable = (QUERY_NODE_COLUMN == nodeType(pOp->pLeft)) ? ((SColumnNode*)pOp->pLeft)->tableAlias : ((SValueNode*)pOp->pLeft)->node.srcTable;
char* opRightTable = (QUERY_NODE_COLUMN == nodeType(pOp->pRight)) ? ((SColumnNode*)pOp->pRight)->tableAlias : ((SValueNode*)pOp->pRight)->node.srcTable;
if (NULL != tSimpleHashGet(pTables, opLeftTable, strlen(opLeftTable))) {
pOrderByNode = pOp->pLeft; pOrderByNode = pOp->pLeft;
} else if (NULL != tSimpleHashGet(pTables, ((SColumnNode*)pOp->pRight)->tableAlias, } else if (NULL != tSimpleHashGet(pTables, opRightTable, strlen(opRightTable))) {
strlen(((SColumnNode*)pOp->pRight)->tableAlias))) {
pOrderByNode = pOp->pRight; pOrderByNode = pOp->pRight;
} }
@ -2683,7 +2727,6 @@ static int32_t sortForJoinOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* pL
} }
*pChildPos = (SNode*)pSort; *pChildPos = (SNode*)pSort;
pSort->node.pParent = (SLogicNode*)pJoin; pSort->node.pParent = (SLogicNode*)pJoin;
;
_return: _return:

View File

@ -375,7 +375,7 @@ static EDealRes doSetSlotId(SNode* pNode, void* pContext) {
SSetSlotIdCxt* pCxt = (SSetSlotIdCxt*)pContext; SSetSlotIdCxt* pCxt = (SSetSlotIdCxt*)pContext;
char* name = NULL; char* name = NULL;
int32_t len = 0; int32_t len = 0;
pCxt->errCode = getSlotKey(pNode, NULL, &name, &len, 16); pCxt->errCode = getSlotKey(pNode, NULL, &name, &len, 64);
if (TSDB_CODE_SUCCESS != pCxt->errCode) { if (TSDB_CODE_SUCCESS != pCxt->errCode) {
return DEAL_RES_ERROR; return DEAL_RES_ERROR;
} }
@ -386,7 +386,10 @@ static EDealRes doSetSlotId(SNode* pNode, void* pContext) {
if (!pIndex) { if (!pIndex) {
pIndex = taosHashGet(pCxt->pRightProdIdxHash, name, strlen(name)); pIndex = taosHashGet(pCxt->pRightProdIdxHash, name, strlen(name));
} }
} else { }
if (NULL == pIndex) {
name[len] = 0;
pIndex = taosHashGet(pCxt->pLeftHash, name, len); pIndex = taosHashGet(pCxt->pLeftHash, name, len);
if (NULL == pIndex) { if (NULL == pIndex) {
pIndex = taosHashGet(pCxt->pRightHash, name, len); pIndex = taosHashGet(pCxt->pRightHash, name, len);
@ -924,7 +927,7 @@ static int32_t setColEqList(SNode* pEqCond, int16_t leftBlkId, int16_t rightBlkI
} }
static int32_t setMergeJoinPrimColEqCond(SNode* pEqCond, int32_t subType, int16_t leftBlkId, int16_t rightBlkId, static int32_t setMergeJoinPrimColEqCond(SNode* pEqCond, int32_t subType, int16_t leftBlkId, int16_t rightBlkId,
SSortMergeJoinPhysiNode* pJoin) { SSortMergeJoinPhysiNode* pJoin, SJoinLogicNode* pJoinLogicNode) {
int32_t code = 0; int32_t code = 0;
if (QUERY_NODE_OPERATOR == nodeType(pEqCond)) { if (QUERY_NODE_OPERATOR == nodeType(pEqCond)) {
SOperatorNode* pOp = (SOperatorNode*)pEqCond; SOperatorNode* pOp = (SOperatorNode*)pEqCond;
@ -947,6 +950,16 @@ static int32_t setMergeJoinPrimColEqCond(SNode* pEqCond, int32_t subType, int16_
} }
break; break;
} }
case QUERY_NODE_VALUE: {
if (pJoinLogicNode && pJoinLogicNode->leftConstPrimGot) {
pJoin->leftPrimExpr = NULL;
code = nodesCloneNode(pOp->pLeft, &pJoin->leftPrimExpr);
break;
}
planError("value node got in prim eq left cond, rightType:%d", pOp->pRight ? nodeType(pOp->pRight) : 0);
return TSDB_CODE_PLAN_INTERNAL_ERROR;
}
case QUERY_NODE_FUNCTION: { case QUERY_NODE_FUNCTION: {
SFunctionNode* pFunc = (SFunctionNode*)pOp->pLeft; SFunctionNode* pFunc = (SFunctionNode*)pOp->pLeft;
if (FUNCTION_TYPE_TIMETRUNCATE != pFunc->funcType) { if (FUNCTION_TYPE_TIMETRUNCATE != pFunc->funcType) {
@ -995,6 +1008,16 @@ static int32_t setMergeJoinPrimColEqCond(SNode* pEqCond, int32_t subType, int16_
} }
break; break;
} }
case QUERY_NODE_VALUE: {
if (pJoinLogicNode && pJoinLogicNode->rightConstPrimGot) {
pJoin->rightPrimExpr = NULL;
code = nodesCloneNode(pOp->pRight, &pJoin->rightPrimExpr);
break;
}
planError("value node got in prim eq right cond, leftType:%d", pOp->pLeft ? nodeType(pOp->pLeft) : 0);
return TSDB_CODE_PLAN_INTERNAL_ERROR;
}
case QUERY_NODE_FUNCTION: { case QUERY_NODE_FUNCTION: {
SFunctionNode* pFunc = (SFunctionNode*)pOp->pRight; SFunctionNode* pFunc = (SFunctionNode*)pOp->pRight;
if (FUNCTION_TYPE_TIMETRUNCATE != pFunc->funcType) { if (FUNCTION_TYPE_TIMETRUNCATE != pFunc->funcType) {
@ -1034,6 +1057,37 @@ static int32_t setMergeJoinPrimColEqCond(SNode* pEqCond, int32_t subType, int16_
return code; return code;
} }
static int32_t removePrimColFromJoinTargets(SNodeList* pTargets, SValueNode* pPrimExpr, SColumnNode** ppRemoved) {
int32_t code = TSDB_CODE_SUCCESS;
SNode* pNode = NULL;
FOREACH(pNode, pTargets) {
SColumnNode* pCol = (SColumnNode*)pNode;
if (0 == strcmp(pCol->tableAlias, pPrimExpr->node.srcTable) && 0 == strcmp(pCol->colName, pPrimExpr->node.aliasName)) {
code = nodesCloneNode(pNode, (SNode**)ppRemoved);
ERASE_NODE(pTargets);
break;
}
}
return code;
}
static int32_t appendPrimColToJoinTargets(SSortMergeJoinPhysiNode* pJoin, SColumnNode** ppTarget, STargetNode* primExpr, int16_t blkId) {
SColumnNode* pCol = *ppTarget;
if (TSDB_DATA_TYPE_TIMESTAMP != pCol->node.resType.type) {
planError("primary key output type is not ts, type:%d", pCol->node.resType.type);
return TSDB_CODE_PAR_PRIM_KEY_MUST_BE_TS;
}
pCol->dataBlockId = blkId;
pCol->slotId = primExpr->slotId;
int32_t code = nodesListMakeStrictAppend(&pJoin->pTargets, (SNode *)pCol);
if (TSDB_CODE_SUCCESS == code) {
*ppTarget = NULL;
}
return code;
}
static int32_t createMergeJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SJoinLogicNode* pJoinLogicNode, static int32_t createMergeJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SJoinLogicNode* pJoinLogicNode,
SPhysiNode** pPhyNode) { SPhysiNode** pPhyNode) {
SSortMergeJoinPhysiNode* pJoin = SSortMergeJoinPhysiNode* pJoin =
@ -1068,7 +1122,7 @@ static int32_t createMergeJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChi
&pJoin->pPrimKeyCond); &pJoin->pPrimKeyCond);
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = setMergeJoinPrimColEqCond(pJoin->pPrimKeyCond, pJoin->subType, pLeftDesc->dataBlockId, code = setMergeJoinPrimColEqCond(pJoin->pPrimKeyCond, pJoin->subType, pLeftDesc->dataBlockId,
pRightDesc->dataBlockId, pJoin); pRightDesc->dataBlockId, pJoin, pJoinLogicNode);
} }
if (TSDB_CODE_SUCCESS == code && NULL != pJoin->leftPrimExpr) { if (TSDB_CODE_SUCCESS == code && NULL != pJoin->leftPrimExpr) {
code = addDataBlockSlot(pCxt, &pJoin->leftPrimExpr, pLeftDesc); code = addDataBlockSlot(pCxt, &pJoin->leftPrimExpr, pLeftDesc);
@ -1084,7 +1138,7 @@ static int32_t createMergeJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChi
&pPrimKeyCond); &pPrimKeyCond);
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = setMergeJoinPrimColEqCond(pPrimKeyCond, pJoin->subType, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, code = setMergeJoinPrimColEqCond(pPrimKeyCond, pJoin->subType, pLeftDesc->dataBlockId, pRightDesc->dataBlockId,
pJoin); pJoin, NULL);
} }
if (TSDB_CODE_SUCCESS == code && NULL != pJoin->leftPrimExpr) { if (TSDB_CODE_SUCCESS == code && NULL != pJoin->leftPrimExpr) {
code = addDataBlockSlot(pCxt, &pJoin->leftPrimExpr, pLeftDesc); code = addDataBlockSlot(pCxt, &pJoin->leftPrimExpr, pLeftDesc);
@ -1095,11 +1149,32 @@ static int32_t createMergeJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChi
nodesDestroyNode(pPrimKeyCond); nodesDestroyNode(pPrimKeyCond);
} }
SValueNode* pLeftPrimExpr = NULL, *pRightPrimExpr = NULL;
SColumnNode* pLeftTarget = NULL, *pRightTarget = NULL;
if (TSDB_CODE_SUCCESS == code && pJoinLogicNode->leftConstPrimGot && pJoin->leftPrimExpr
&& QUERY_NODE_VALUE == nodeType(((STargetNode*)pJoin->leftPrimExpr)->pExpr)) {
pLeftPrimExpr = (SValueNode*)((STargetNode*)pJoin->leftPrimExpr)->pExpr;
code = removePrimColFromJoinTargets(pJoinLogicNode->node.pTargets, pLeftPrimExpr, &pLeftTarget);
}
if (TSDB_CODE_SUCCESS == code && pJoinLogicNode->rightConstPrimGot && pJoin->rightPrimExpr
&& QUERY_NODE_VALUE == nodeType(((STargetNode*)pJoin->rightPrimExpr)->pExpr)) {
pRightPrimExpr = (SValueNode*)((STargetNode*)pJoin->rightPrimExpr)->pExpr;
code = removePrimColFromJoinTargets(pJoinLogicNode->node.pTargets, pRightPrimExpr, &pRightTarget);
}
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = setListSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->node.pTargets, code = setListSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->node.pTargets,
&pJoin->pTargets); &pJoin->pTargets);
} }
if (TSDB_CODE_SUCCESS == code && pLeftPrimExpr && pLeftTarget) {
code = appendPrimColToJoinTargets(pJoin, &pLeftTarget, (STargetNode*)pJoin->leftPrimExpr, pLeftDesc->dataBlockId);
}
if (TSDB_CODE_SUCCESS == code && pRightPrimExpr && pRightTarget) {
code = appendPrimColToJoinTargets(pJoin, &pRightTarget, (STargetNode*)pJoin->rightPrimExpr, pRightDesc->dataBlockId);
}
if (TSDB_CODE_SUCCESS == code && NULL != pJoinLogicNode->pFullOnCond) { if (TSDB_CODE_SUCCESS == code && NULL != pJoinLogicNode->pFullOnCond) {
code = setNodeSlotId(pCxt, ((SPhysiNode*)pJoin)->pOutputDataBlockDesc->dataBlockId, -1, pJoinLogicNode->pFullOnCond, code = setNodeSlotId(pCxt, ((SPhysiNode*)pJoin)->pOutputDataBlockDesc->dataBlockId, -1, pJoinLogicNode->pFullOnCond,
&pJoin->pFullOnCond); &pJoin->pFullOnCond);
@ -1151,6 +1226,8 @@ static int32_t createMergeJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChi
*pPhyNode = (SPhysiNode*)pJoin; *pPhyNode = (SPhysiNode*)pJoin;
} else { } else {
nodesDestroyNode((SNode*)pJoin); nodesDestroyNode((SNode*)pJoin);
nodesDestroyNode((SNode*)pLeftTarget);
nodesDestroyNode((SNode*)pRightTarget);
} }
return code; return code;

View File

@ -1133,29 +1133,7 @@ static uint8_t sclGetOpValueNodeTsPrecision(SNode *pLeft, SNode *pRight) {
return 0; return 0;
} }
int32_t sclConvertOpValueNodeTs(SOperatorNode *node) {
if (node->pLeft && SCL_IS_VAR_VALUE_NODE(node->pLeft)) {
if (node->pRight && (TSDB_DATA_TYPE_TIMESTAMP == ((SExprNode *)node->pRight)->resType.type)) {
SCL_ERR_RET(
sclConvertToTsValueNode(sclGetOpValueNodeTsPrecision(node->pLeft, node->pRight), (SValueNode *)node->pLeft));
}
} else if (node->pRight && SCL_IS_NOTNULL_CONST_NODE(node->pRight)) {
if (node->pLeft && (TSDB_DATA_TYPE_TIMESTAMP == ((SExprNode *)node->pLeft)->resType.type)) {
if (SCL_IS_VAR_VALUE_NODE(node->pRight)) {
SCL_ERR_RET(sclConvertToTsValueNode(sclGetOpValueNodeTsPrecision(node->pLeft, node->pRight),
(SValueNode *)node->pRight));
} else if (QUERY_NODE_NODE_LIST == node->pRight->type) {
SNode *pNode;
FOREACH(pNode, ((SNodeListNode *)node->pRight)->pNodeList) {
if (SCL_IS_VAR_VALUE_NODE(pNode)) {
SCL_ERR_RET(sclConvertToTsValueNode(sclGetOpValueNodeTsPrecision(node->pLeft, pNode), (SValueNode *)pNode));
}
}
}
}
}
return TSDB_CODE_SUCCESS;
}
int32_t sclConvertCaseWhenValueNodeTs(SCaseWhenNode *node) { int32_t sclConvertCaseWhenValueNodeTs(SCaseWhenNode *node) {
if (NULL == node->pCase) { if (NULL == node->pCase) {
@ -1344,7 +1322,7 @@ EDealRes sclRewriteLogic(SNode **pNode, SScalarCtx *ctx) {
EDealRes sclRewriteOperator(SNode **pNode, SScalarCtx *ctx) { EDealRes sclRewriteOperator(SNode **pNode, SScalarCtx *ctx) {
SOperatorNode *node = (SOperatorNode *)*pNode; SOperatorNode *node = (SOperatorNode *)*pNode;
ctx->code = sclConvertOpValueNodeTs(node); ctx->code = scalarConvertOpValueNodeTs(node);
if (ctx->code) { if (ctx->code) {
return DEAL_RES_ERROR; return DEAL_RES_ERROR;
} }
@ -1807,6 +1785,31 @@ static int32_t sclGetBitwiseOperatorResType(SOperatorNode *pOp) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t scalarConvertOpValueNodeTs(SOperatorNode *node) {
if (node->pLeft && SCL_IS_VAR_VALUE_NODE(node->pLeft)) {
if (node->pRight && (TSDB_DATA_TYPE_TIMESTAMP == ((SExprNode *)node->pRight)->resType.type)) {
SCL_ERR_RET(
sclConvertToTsValueNode(sclGetOpValueNodeTsPrecision(node->pLeft, node->pRight), (SValueNode *)node->pLeft));
}
} else if (node->pRight && SCL_IS_NOTNULL_CONST_NODE(node->pRight)) {
if (node->pLeft && (TSDB_DATA_TYPE_TIMESTAMP == ((SExprNode *)node->pLeft)->resType.type)) {
if (SCL_IS_VAR_VALUE_NODE(node->pRight)) {
SCL_ERR_RET(sclConvertToTsValueNode(sclGetOpValueNodeTsPrecision(node->pLeft, node->pRight),
(SValueNode *)node->pRight));
} else if (QUERY_NODE_NODE_LIST == node->pRight->type) {
SNode *pNode;
FOREACH(pNode, ((SNodeListNode *)node->pRight)->pNodeList) {
if (SCL_IS_VAR_VALUE_NODE(pNode)) {
SCL_ERR_RET(sclConvertToTsValueNode(sclGetOpValueNodeTsPrecision(node->pLeft, pNode), (SValueNode *)pNode));
}
}
}
}
}
return TSDB_CODE_SUCCESS;
}
int32_t scalarCalculateConstants(SNode *pNode, SNode **pRes) { return sclCalcConstants(pNode, false, pRes); } int32_t scalarCalculateConstants(SNode *pNode, SNode **pRes) { return sclCalcConstants(pNode, false, pRes); }
int32_t scalarCalculateConstantsFromDual(SNode *pNode, SNode **pRes) { return sclCalcConstants(pNode, true, pRes); } int32_t scalarCalculateConstantsFromDual(SNode *pNode, SNode **pRes) { return sclCalcConstants(pNode, true, pRes); }

View File

@ -196,7 +196,7 @@ TEST(osFileTests, taosCopyFile) {
retsize = taosReadFromCFile(NULL, 0, 0, NULL); retsize = taosReadFromCFile(NULL, 0, 0, NULL);
EXPECT_EQ(retsize, 0); EXPECT_EQ(retsize, 0);
taosRemoveFile(from); taosRemoveFile(from);
} }
TEST(osFileTests, taosCreateFile) { TEST(osFileTests, taosCreateFile) {

View File

@ -389,7 +389,7 @@ TEST(osTest, osFile) {
(void)taosThreadJoin(thread2, NULL); (void)taosThreadJoin(thread2, NULL);
taosThreadClear(&thread2); taosThreadClear(&thread2);
// int ret = taosRemoveFile(fname); taosRemoveFile(fname);
// ASSERT_EQ(ret, 0); // ASSERT_EQ(ret, 0);
// printf("remove file success"); // printf("remove file success");
} }
@ -651,6 +651,8 @@ TEST(osTest, osFilePerformance) {
taosMemoryFree(writeBuffer); taosMemoryFree(writeBuffer);
taosMemoryFree(readBuffer); taosMemoryFree(readBuffer);
taosRemoveFile(fname);
(void)printf("Test Write file %d times, cost: %" PRId64 "us\n", TESTTIMES, WriteFileCost); (void)printf("Test Write file %d times, cost: %" PRId64 "us\n", TESTTIMES, WriteFileCost);
(void)printf("Test Read file %d times, cost: %" PRId64 "us\n", TESTTIMES, ReadFileCost); (void)printf("Test Read file %d times, cost: %" PRId64 "us\n", TESTTIMES, ReadFileCost);
(void)printf("Test OpenForWrite & Close file %d times, cost: %" PRId64 "us\n", TESTTIMES, OpenForWriteCloseFileCost); (void)printf("Test OpenForWrite & Close file %d times, cost: %" PRId64 "us\n", TESTTIMES, OpenForWriteCloseFileCost);

View File

@ -757,9 +757,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_TRUE_FOR_NEGATIVE, "True_for duration c
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_TRUE_FOR_UNIT, "Cannot use 'year' or 'month' as true_for duration") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_TRUE_FOR_UNIT, "Cannot use 'year' or 'month' as true_for duration")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_COLS_FUNCTION, "Invalid cols function") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_COLS_FUNCTION, "Invalid cols function")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_COLS_SELECTFUNC, "cols function's first param must be a select function that output a single row") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_COLS_SELECTFUNC, "cols function's first param must be a select function that output a single row")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_MULITI_COLS_FUNC, "Improper use of cols function with multiple output columns") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_COLS_ALIAS, "Invalid using alias for cols function")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_COLS_ALIAS, "Invalid using alias for cols function") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_PRIM_KEY_MUST_BE_TS, "Join primary key col must be timestmap type")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTERNAL_ERROR, "Parser internal error") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTERNAL_ERROR, "Parser internal error")
//planner //planner

View File

@ -18,6 +18,8 @@ import datetime
import random import random
import copy import copy
import json import json
import tempfile
import uuid
import frame.eos import frame.eos
import frame.etool import frame.etool
@ -477,3 +479,48 @@ class TBase:
print(rlist) print(rlist)
return rlist return rlist
# generate new json file
def genNewJson(self, jsonFile, modifyFunc=None):
try:
with open(jsonFile, 'r', encoding='utf-8') as f:
data = json.load(f)
except FileNotFoundError:
tdLog.info(f"the specified json file '{jsonFile}' was not found.")
return None
except Exception as e:
tdLog.info(f"error reading the json file: {e}")
return None
if callable(modifyFunc):
modifyFunc(data)
tempDir = os.path.join(tempfile.gettempdir(), 'json_templates')
try:
os.makedirs(tempDir, exist_ok=True)
except PermissionError:
tdLog.info(f"no sufficient permissions to create directory at '{tempDir}'.")
return None
except Exception as e:
tdLog.info(f"error creating temporary directory: {e}")
return None
tempPath = os.path.join(tempDir, f"temp_{uuid.uuid4().hex}.json")
try:
with open(tempPath, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
except Exception as e:
tdLog.info(f"error writing to temporary json file: {e}")
return None
tdLog.info(f"create temporary json file successfully, file: {tempPath}")
return tempPath
# delete file
def deleteFile(self, filename):
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as err:
raise Exception(err)

View File

@ -42,8 +42,8 @@ class TDTestCase(TBase):
tdSql.execute("insert into d0 file '%s'" % datafile) tdSql.execute("insert into d0 file '%s'" % datafile)
tdSql.execute("CREATE TABLE `n1` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, co NCHAR(10))") tdSql.execute("CREATE TABLE `n1` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, co NCHAR(10))")
tdSql.execute("insert into n1 values(now, 1, null, '23')") tdSql.execute("insert into n1 values(now, 1, null, '23')")
tdSql.execute("insert into n1 values(now, null, 3, '23')") tdSql.execute("insert into n1 values(now+1a, null, 3, '23')")
tdSql.execute("insert into n1 values(now, 5, 3, '23')") tdSql.execute("insert into n1 values(now+2a, 5, 3, '23')")
def test_normal_query_new(self, testCase): def test_normal_query_new(self, testCase):
# read sql from .sql file and execute # read sql from .sql file and execute

Some files were not shown because too many files have changed in this diff Show More